1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2,FALLBACK0
3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42,FALLBACK1
4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1-ONLY,FALLBACK2
5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2,AVX2-SLOW,FALLBACK3
6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2,AVX2-FAST-PERLANE,FALLBACK4
7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2,AVX2-FAST,FALLBACK5
8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512F,AVX512F-SLOW,FALLBACK6
9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512F,AVX512F-FAST,FALLBACK7
10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512BW,AVX512BW-SLOW,FALLBACK8
11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512BW,AVX512BW-FAST,FALLBACK9
13 define void @vec16_v2i8_to_v1i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
14 ; SSE2-LABEL: vec16_v2i8_to_v1i16_factor2:
16 ; SSE2-NEXT: movdqa (%rdi), %xmm0
17 ; SSE2-NEXT: paddb (%rsi), %xmm0
18 ; SSE2-NEXT: pxor %xmm1, %xmm1
19 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
20 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
21 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
22 ; SSE2-NEXT: paddb (%rdx), %xmm0
23 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
26 ; SSE42-LABEL: vec16_v2i8_to_v1i16_factor2:
28 ; SSE42-NEXT: movdqa (%rdi), %xmm0
29 ; SSE42-NEXT: paddb (%rsi), %xmm0
30 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
31 ; SSE42-NEXT: paddb (%rdx), %xmm0
32 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
35 ; AVX-LABEL: vec16_v2i8_to_v1i16_factor2:
37 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
38 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
39 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
40 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
41 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
44 ; AVX2-LABEL: vec16_v2i8_to_v1i16_factor2:
46 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
47 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
48 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
49 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
50 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
51 ; AVX2-NEXT: vzeroupper
54 ; AVX512F-LABEL: vec16_v2i8_to_v1i16_factor2:
56 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
57 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
58 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
59 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
60 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
61 ; AVX512F-NEXT: vzeroupper
64 ; AVX512BW-LABEL: vec16_v2i8_to_v1i16_factor2:
66 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
67 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
68 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
69 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
70 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
71 ; AVX512BW-NEXT: vzeroupper
73 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
74 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
75 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
76 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <2 x i32> <i32 0, i32 1>
77 %zextd.vec = shufflevector <2 x i8> %in.vec.trunc, <2 x i8> zeroinitializer, <2 x i32> <i32 0, i32 3>
78 %out.bytevec.padded = shufflevector <2 x i8> %zextd.vec, <2 x i8> poison, <64 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
79 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
80 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
81 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
85 define void @vec32_v4i8_to_v2i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
86 ; SSE2-LABEL: vec32_v4i8_to_v2i16_factor2:
88 ; SSE2-NEXT: movdqa (%rdi), %xmm0
89 ; SSE2-NEXT: paddb (%rsi), %xmm0
90 ; SSE2-NEXT: pxor %xmm1, %xmm1
91 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
92 ; SSE2-NEXT: paddb (%rdx), %xmm0
93 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
96 ; SSE42-LABEL: vec32_v4i8_to_v2i16_factor2:
98 ; SSE42-NEXT: movdqa (%rdi), %xmm0
99 ; SSE42-NEXT: paddb (%rsi), %xmm0
100 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
101 ; SSE42-NEXT: paddb (%rdx), %xmm0
102 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
105 ; AVX-LABEL: vec32_v4i8_to_v2i16_factor2:
107 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
108 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
109 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
110 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
111 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
114 ; AVX2-LABEL: vec32_v4i8_to_v2i16_factor2:
116 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
117 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
118 ; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
119 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
120 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
121 ; AVX2-NEXT: vzeroupper
124 ; AVX512F-LABEL: vec32_v4i8_to_v2i16_factor2:
126 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
127 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
128 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
129 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
130 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
131 ; AVX512F-NEXT: vzeroupper
134 ; AVX512BW-LABEL: vec32_v4i8_to_v2i16_factor2:
136 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
137 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
138 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
139 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
140 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
141 ; AVX512BW-NEXT: vzeroupper
142 ; AVX512BW-NEXT: retq
143 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
144 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
145 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
146 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
147 %zextd.vec = shufflevector <4 x i8> %in.vec.trunc, <4 x i8> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
148 %out.bytevec.padded = shufflevector <4 x i8> %zextd.vec, <4 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
149 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
150 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
151 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
155 define void @vec32_v4i8_to_v1i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
156 ; SSE2-LABEL: vec32_v4i8_to_v1i32_factor4:
158 ; SSE2-NEXT: movdqa (%rdi), %xmm0
159 ; SSE2-NEXT: paddb (%rsi), %xmm0
160 ; SSE2-NEXT: pxor %xmm1, %xmm1
161 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
162 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
163 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
164 ; SSE2-NEXT: paddb (%rdx), %xmm0
165 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
168 ; SSE42-LABEL: vec32_v4i8_to_v1i32_factor4:
170 ; SSE42-NEXT: movdqa (%rdi), %xmm0
171 ; SSE42-NEXT: paddb (%rsi), %xmm0
172 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
173 ; SSE42-NEXT: paddb (%rdx), %xmm0
174 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
177 ; AVX-LABEL: vec32_v4i8_to_v1i32_factor4:
179 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
180 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
181 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
182 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
183 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
186 ; AVX2-LABEL: vec32_v4i8_to_v1i32_factor4:
188 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
189 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
190 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
191 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
192 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
193 ; AVX2-NEXT: vzeroupper
196 ; AVX512F-LABEL: vec32_v4i8_to_v1i32_factor4:
198 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
199 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
200 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
201 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
202 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
203 ; AVX512F-NEXT: vzeroupper
206 ; AVX512BW-LABEL: vec32_v4i8_to_v1i32_factor4:
208 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
209 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
210 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
211 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
212 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
213 ; AVX512BW-NEXT: vzeroupper
214 ; AVX512BW-NEXT: retq
215 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
216 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
217 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
218 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
219 %zextd.vec = shufflevector <4 x i8> %in.vec.trunc, <4 x i8> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
220 %out.bytevec.padded = shufflevector <4 x i8> %zextd.vec, <4 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
221 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
222 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
223 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
227 define void @vec32_v2i16_to_v1i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
228 ; SSE2-LABEL: vec32_v2i16_to_v1i32_factor2:
230 ; SSE2-NEXT: movdqa (%rdi), %xmm0
231 ; SSE2-NEXT: paddb (%rsi), %xmm0
232 ; SSE2-NEXT: pxor %xmm1, %xmm1
233 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
234 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
235 ; SSE2-NEXT: paddb (%rdx), %xmm0
236 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
239 ; SSE42-LABEL: vec32_v2i16_to_v1i32_factor2:
241 ; SSE42-NEXT: movdqa (%rdi), %xmm0
242 ; SSE42-NEXT: paddb (%rsi), %xmm0
243 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
244 ; SSE42-NEXT: paddb (%rdx), %xmm0
245 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
248 ; AVX-LABEL: vec32_v2i16_to_v1i32_factor2:
250 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
251 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
252 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
253 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
254 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
257 ; AVX2-LABEL: vec32_v2i16_to_v1i32_factor2:
259 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
260 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
261 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
262 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
263 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
264 ; AVX2-NEXT: vzeroupper
267 ; AVX512F-LABEL: vec32_v2i16_to_v1i32_factor2:
269 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
270 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
271 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
272 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
273 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
274 ; AVX512F-NEXT: vzeroupper
277 ; AVX512BW-LABEL: vec32_v2i16_to_v1i32_factor2:
279 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
280 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
281 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
282 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
283 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
284 ; AVX512BW-NEXT: vzeroupper
285 ; AVX512BW-NEXT: retq
286 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
287 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
288 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
289 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
290 %in.vec.cast = bitcast <4 x i8> %in.vec.trunc to <2 x i16>
291 %zextd.vec = shufflevector <2 x i16> %in.vec.cast, <2 x i16> zeroinitializer, <2 x i32> <i32 0, i32 3>
292 %out.bytevec = bitcast <2 x i16> %zextd.vec to <4 x i8>
293 %out.bytevec.padded = shufflevector <4 x i8> %out.bytevec, <4 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
294 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
295 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
296 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
300 define void @vec64_v8i8_to_v4i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
301 ; SSE2-LABEL: vec64_v8i8_to_v4i16_factor2:
303 ; SSE2-NEXT: movdqa (%rdi), %xmm0
304 ; SSE2-NEXT: paddb (%rsi), %xmm0
305 ; SSE2-NEXT: pxor %xmm1, %xmm1
306 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
307 ; SSE2-NEXT: paddb (%rdx), %xmm0
308 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
311 ; SSE42-LABEL: vec64_v8i8_to_v4i16_factor2:
313 ; SSE42-NEXT: movdqa (%rdi), %xmm0
314 ; SSE42-NEXT: paddb (%rsi), %xmm0
315 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
316 ; SSE42-NEXT: paddb (%rdx), %xmm0
317 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
320 ; AVX-LABEL: vec64_v8i8_to_v4i16_factor2:
322 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
323 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
324 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
325 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
326 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
329 ; AVX2-LABEL: vec64_v8i8_to_v4i16_factor2:
331 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
332 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
333 ; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
334 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
335 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
336 ; AVX2-NEXT: vzeroupper
339 ; AVX512F-LABEL: vec64_v8i8_to_v4i16_factor2:
341 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
342 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
343 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
344 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
345 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
346 ; AVX512F-NEXT: vzeroupper
349 ; AVX512BW-LABEL: vec64_v8i8_to_v4i16_factor2:
351 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
352 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
353 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
354 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
355 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
356 ; AVX512BW-NEXT: vzeroupper
357 ; AVX512BW-NEXT: retq
358 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
359 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
360 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
361 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
362 %zextd.vec = shufflevector <8 x i8> %in.vec.trunc, <8 x i8> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
363 %out.bytevec.padded = shufflevector <8 x i8> %zextd.vec, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
364 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
365 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
366 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
370 define void @vec64_v8i8_to_v2i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
371 ; SSE2-LABEL: vec64_v8i8_to_v2i32_factor4:
373 ; SSE2-NEXT: movdqa (%rdi), %xmm0
374 ; SSE2-NEXT: paddb (%rsi), %xmm0
375 ; SSE2-NEXT: pxor %xmm1, %xmm1
376 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
377 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
378 ; SSE2-NEXT: paddb (%rdx), %xmm0
379 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
382 ; SSE42-LABEL: vec64_v8i8_to_v2i32_factor4:
384 ; SSE42-NEXT: movdqa (%rdi), %xmm0
385 ; SSE42-NEXT: paddb (%rsi), %xmm0
386 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
387 ; SSE42-NEXT: paddb (%rdx), %xmm0
388 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
391 ; AVX-LABEL: vec64_v8i8_to_v2i32_factor4:
393 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
394 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
395 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
396 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
397 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
400 ; AVX2-LABEL: vec64_v8i8_to_v2i32_factor4:
402 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
403 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
404 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
405 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
406 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
407 ; AVX2-NEXT: vzeroupper
410 ; AVX512F-LABEL: vec64_v8i8_to_v2i32_factor4:
412 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
413 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
414 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
415 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
416 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
417 ; AVX512F-NEXT: vzeroupper
420 ; AVX512BW-LABEL: vec64_v8i8_to_v2i32_factor4:
422 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
423 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
424 ; AVX512BW-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
425 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
426 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
427 ; AVX512BW-NEXT: vzeroupper
428 ; AVX512BW-NEXT: retq
429 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
430 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
431 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
432 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
433 %zextd.vec = shufflevector <8 x i8> %in.vec.trunc, <8 x i8> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
434 %out.bytevec.padded = shufflevector <8 x i8> %zextd.vec, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
435 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
436 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
437 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
441 define void @vec64_v8i8_to_v1i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
442 ; SSE2-LABEL: vec64_v8i8_to_v1i64_factor8:
444 ; SSE2-NEXT: movdqa (%rdi), %xmm0
445 ; SSE2-NEXT: paddb (%rsi), %xmm0
446 ; SSE2-NEXT: pxor %xmm1, %xmm1
447 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
448 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
449 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
450 ; SSE2-NEXT: paddb (%rdx), %xmm0
451 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
454 ; SSE42-LABEL: vec64_v8i8_to_v1i64_factor8:
456 ; SSE42-NEXT: movdqa (%rdi), %xmm0
457 ; SSE42-NEXT: paddb (%rsi), %xmm0
458 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
459 ; SSE42-NEXT: paddb (%rdx), %xmm0
460 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
463 ; AVX-LABEL: vec64_v8i8_to_v1i64_factor8:
465 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
466 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
467 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
468 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
469 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
472 ; AVX2-LABEL: vec64_v8i8_to_v1i64_factor8:
474 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
475 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
476 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
477 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
478 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
479 ; AVX2-NEXT: vzeroupper
482 ; AVX512F-LABEL: vec64_v8i8_to_v1i64_factor8:
484 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
485 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
486 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
487 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
488 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
489 ; AVX512F-NEXT: vzeroupper
492 ; AVX512BW-LABEL: vec64_v8i8_to_v1i64_factor8:
494 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
495 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
496 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
497 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
498 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
499 ; AVX512BW-NEXT: vzeroupper
500 ; AVX512BW-NEXT: retq
501 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
502 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
503 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
504 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
505 %zextd.vec = shufflevector <8 x i8> %in.vec.trunc, <8 x i8> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
506 %out.bytevec.padded = shufflevector <8 x i8> %zextd.vec, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
507 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
508 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
509 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
513 define void @vec64_v4i16_to_v2i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
514 ; SSE2-LABEL: vec64_v4i16_to_v2i32_factor2:
516 ; SSE2-NEXT: movdqa (%rdi), %xmm0
517 ; SSE2-NEXT: paddb (%rsi), %xmm0
518 ; SSE2-NEXT: pxor %xmm1, %xmm1
519 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
520 ; SSE2-NEXT: paddb (%rdx), %xmm0
521 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
524 ; SSE42-LABEL: vec64_v4i16_to_v2i32_factor2:
526 ; SSE42-NEXT: movdqa (%rdi), %xmm0
527 ; SSE42-NEXT: paddb (%rsi), %xmm0
528 ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
529 ; SSE42-NEXT: paddb (%rdx), %xmm0
530 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
533 ; AVX-LABEL: vec64_v4i16_to_v2i32_factor2:
535 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
536 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
537 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
538 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
539 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
542 ; AVX2-LABEL: vec64_v4i16_to_v2i32_factor2:
544 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
545 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
546 ; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
547 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
548 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
549 ; AVX2-NEXT: vzeroupper
552 ; AVX512F-LABEL: vec64_v4i16_to_v2i32_factor2:
554 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
555 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
556 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
557 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
558 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
559 ; AVX512F-NEXT: vzeroupper
562 ; AVX512BW-LABEL: vec64_v4i16_to_v2i32_factor2:
564 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
565 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
566 ; AVX512BW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
567 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
568 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
569 ; AVX512BW-NEXT: vzeroupper
570 ; AVX512BW-NEXT: retq
571 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
572 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
573 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
574 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
575 %in.vec.cast = bitcast <8 x i8> %in.vec.trunc to <4 x i16>
576 %zextd.vec = shufflevector <4 x i16> %in.vec.cast, <4 x i16> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
577 %out.bytevec = bitcast <4 x i16> %zextd.vec to <8 x i8>
578 %out.bytevec.padded = shufflevector <8 x i8> %out.bytevec, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
579 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
580 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
581 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
585 define void @vec64_v4i16_to_v1i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
586 ; SSE2-LABEL: vec64_v4i16_to_v1i64_factor4:
588 ; SSE2-NEXT: movdqa (%rdi), %xmm0
589 ; SSE2-NEXT: paddb (%rsi), %xmm0
590 ; SSE2-NEXT: pxor %xmm1, %xmm1
591 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
592 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
593 ; SSE2-NEXT: paddb (%rdx), %xmm0
594 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
597 ; SSE42-LABEL: vec64_v4i16_to_v1i64_factor4:
599 ; SSE42-NEXT: movdqa (%rdi), %xmm0
600 ; SSE42-NEXT: paddb (%rsi), %xmm0
601 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
602 ; SSE42-NEXT: paddb (%rdx), %xmm0
603 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
606 ; AVX-LABEL: vec64_v4i16_to_v1i64_factor4:
608 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
609 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
610 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
611 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
612 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
615 ; AVX2-LABEL: vec64_v4i16_to_v1i64_factor4:
617 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
618 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
619 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
620 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
621 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
622 ; AVX2-NEXT: vzeroupper
625 ; AVX512F-LABEL: vec64_v4i16_to_v1i64_factor4:
627 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
628 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
629 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
630 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
631 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
632 ; AVX512F-NEXT: vzeroupper
635 ; AVX512BW-LABEL: vec64_v4i16_to_v1i64_factor4:
637 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
638 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
639 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
640 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
641 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
642 ; AVX512BW-NEXT: vzeroupper
643 ; AVX512BW-NEXT: retq
644 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
645 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
646 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
647 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
648 %in.vec.cast = bitcast <8 x i8> %in.vec.trunc to <4 x i16>
649 %zextd.vec = shufflevector <4 x i16> %in.vec.cast, <4 x i16> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
650 %out.bytevec = bitcast <4 x i16> %zextd.vec to <8 x i8>
651 %out.bytevec.padded = shufflevector <8 x i8> %out.bytevec, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
652 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
653 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
654 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
658 define void @vec64_v2i32_to_v1i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
659 ; SSE2-LABEL: vec64_v2i32_to_v1i64_factor2:
661 ; SSE2-NEXT: movdqa (%rdi), %xmm0
662 ; SSE2-NEXT: paddb (%rsi), %xmm0
663 ; SSE2-NEXT: pxor %xmm1, %xmm1
664 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
665 ; SSE2-NEXT: paddb (%rdx), %xmm0
666 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
669 ; SSE42-LABEL: vec64_v2i32_to_v1i64_factor2:
671 ; SSE42-NEXT: movdqa (%rdi), %xmm0
672 ; SSE42-NEXT: paddb (%rsi), %xmm0
673 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
674 ; SSE42-NEXT: paddb (%rdx), %xmm0
675 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
678 ; AVX-LABEL: vec64_v2i32_to_v1i64_factor2:
680 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
681 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
682 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
683 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
684 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
687 ; AVX2-LABEL: vec64_v2i32_to_v1i64_factor2:
689 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
690 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
691 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
692 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
693 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
694 ; AVX2-NEXT: vzeroupper
697 ; AVX512F-LABEL: vec64_v2i32_to_v1i64_factor2:
699 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
700 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
701 ; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
702 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
703 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
704 ; AVX512F-NEXT: vzeroupper
707 ; AVX512BW-LABEL: vec64_v2i32_to_v1i64_factor2:
709 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
710 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
711 ; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
712 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
713 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
714 ; AVX512BW-NEXT: vzeroupper
715 ; AVX512BW-NEXT: retq
716 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
717 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
718 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
719 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
720 %in.vec.cast = bitcast <8 x i8> %in.vec.trunc to <2 x i32>
721 %zextd.vec = shufflevector <2 x i32> %in.vec.cast, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 3>
722 %out.bytevec = bitcast <2 x i32> %zextd.vec to <8 x i8>
723 %out.bytevec.padded = shufflevector <8 x i8> %out.bytevec, <8 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
724 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
725 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
726 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
730 define void @vec128_v16i8_to_v8i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
731 ; SSE2-LABEL: vec128_v16i8_to_v8i16_factor2:
733 ; SSE2-NEXT: movdqa (%rdi), %xmm0
734 ; SSE2-NEXT: paddb (%rsi), %xmm0
735 ; SSE2-NEXT: pxor %xmm1, %xmm1
736 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
737 ; SSE2-NEXT: paddb (%rdx), %xmm0
738 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
741 ; SSE42-LABEL: vec128_v16i8_to_v8i16_factor2:
743 ; SSE42-NEXT: movdqa (%rdi), %xmm0
744 ; SSE42-NEXT: paddb (%rsi), %xmm0
745 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
746 ; SSE42-NEXT: paddb (%rdx), %xmm0
747 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
750 ; AVX-LABEL: vec128_v16i8_to_v8i16_factor2:
752 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
753 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
754 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
755 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
756 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
759 ; AVX2-LABEL: vec128_v16i8_to_v8i16_factor2:
761 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
762 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
763 ; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
764 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
765 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
766 ; AVX2-NEXT: vzeroupper
769 ; AVX512F-LABEL: vec128_v16i8_to_v8i16_factor2:
771 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
772 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
773 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
774 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
775 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
776 ; AVX512F-NEXT: vzeroupper
779 ; AVX512BW-LABEL: vec128_v16i8_to_v8i16_factor2:
781 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
782 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
783 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
784 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
785 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
786 ; AVX512BW-NEXT: vzeroupper
787 ; AVX512BW-NEXT: retq
788 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
789 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
790 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
791 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
792 %zextd.vec = shufflevector <16 x i8> %in.vec.trunc, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
793 %out.bytevec.padded = shufflevector <16 x i8> %zextd.vec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
794 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
795 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
796 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
800 define void @vec128_v16i8_to_v4i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
801 ; SSE2-LABEL: vec128_v16i8_to_v4i32_factor4:
803 ; SSE2-NEXT: movdqa (%rdi), %xmm0
804 ; SSE2-NEXT: paddb (%rsi), %xmm0
805 ; SSE2-NEXT: pxor %xmm1, %xmm1
806 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
807 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
808 ; SSE2-NEXT: paddb (%rdx), %xmm0
809 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
812 ; SSE42-LABEL: vec128_v16i8_to_v4i32_factor4:
814 ; SSE42-NEXT: movdqa (%rdi), %xmm0
815 ; SSE42-NEXT: paddb (%rsi), %xmm0
816 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
817 ; SSE42-NEXT: paddb (%rdx), %xmm0
818 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
821 ; AVX-LABEL: vec128_v16i8_to_v4i32_factor4:
823 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
824 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
825 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
826 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
827 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
830 ; AVX2-LABEL: vec128_v16i8_to_v4i32_factor4:
832 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
833 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
834 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
835 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
836 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
837 ; AVX2-NEXT: vzeroupper
840 ; AVX512F-LABEL: vec128_v16i8_to_v4i32_factor4:
842 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
843 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
844 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
845 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
846 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
847 ; AVX512F-NEXT: vzeroupper
850 ; AVX512BW-LABEL: vec128_v16i8_to_v4i32_factor4:
852 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
853 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
854 ; AVX512BW-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
855 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
856 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
857 ; AVX512BW-NEXT: vzeroupper
858 ; AVX512BW-NEXT: retq
859 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
860 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
861 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
862 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
863 %zextd.vec = shufflevector <16 x i8> %in.vec.trunc, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
864 %out.bytevec.padded = shufflevector <16 x i8> %zextd.vec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
865 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
866 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
867 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
871 define void @vec128_v16i8_to_v2i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
872 ; SSE2-LABEL: vec128_v16i8_to_v2i64_factor8:
874 ; SSE2-NEXT: movdqa (%rdi), %xmm0
875 ; SSE2-NEXT: paddb (%rsi), %xmm0
876 ; SSE2-NEXT: pxor %xmm1, %xmm1
877 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
878 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
879 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
880 ; SSE2-NEXT: paddb (%rdx), %xmm0
881 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
884 ; SSE42-LABEL: vec128_v16i8_to_v2i64_factor8:
886 ; SSE42-NEXT: movdqa (%rdi), %xmm0
887 ; SSE42-NEXT: paddb (%rsi), %xmm0
888 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
889 ; SSE42-NEXT: paddb (%rdx), %xmm0
890 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
893 ; AVX-LABEL: vec128_v16i8_to_v2i64_factor8:
895 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
896 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
897 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
898 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
899 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
902 ; AVX2-LABEL: vec128_v16i8_to_v2i64_factor8:
904 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
905 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
906 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
907 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
908 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
909 ; AVX2-NEXT: vzeroupper
912 ; AVX512F-LABEL: vec128_v16i8_to_v2i64_factor8:
914 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
915 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
916 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
917 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
918 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
919 ; AVX512F-NEXT: vzeroupper
922 ; AVX512BW-LABEL: vec128_v16i8_to_v2i64_factor8:
924 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
925 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
926 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
927 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
928 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
929 ; AVX512BW-NEXT: vzeroupper
930 ; AVX512BW-NEXT: retq
931 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
932 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
933 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
934 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
935 %zextd.vec = shufflevector <16 x i8> %in.vec.trunc, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 1, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
936 %out.bytevec.padded = shufflevector <16 x i8> %zextd.vec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
937 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
938 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
939 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
943 define void @vec128_v16i8_to_v1i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
944 ; SSE-LABEL: vec128_v16i8_to_v1i128_factor16:
946 ; SSE-NEXT: movdqa (%rdi), %xmm0
947 ; SSE-NEXT: paddb (%rsi), %xmm0
948 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
949 ; SSE-NEXT: paddb (%rdx), %xmm0
950 ; SSE-NEXT: movdqa %xmm0, (%rcx)
953 ; AVX-LABEL: vec128_v16i8_to_v1i128_factor16:
955 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
956 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
957 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
958 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
959 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
962 ; AVX2-LABEL: vec128_v16i8_to_v1i128_factor16:
964 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
965 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
966 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
967 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
968 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
969 ; AVX2-NEXT: vzeroupper
972 ; AVX512F-LABEL: vec128_v16i8_to_v1i128_factor16:
974 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
975 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
976 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
977 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
978 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
979 ; AVX512F-NEXT: vzeroupper
982 ; AVX512BW-LABEL: vec128_v16i8_to_v1i128_factor16:
984 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
985 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
986 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
987 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
988 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
989 ; AVX512BW-NEXT: vzeroupper
990 ; AVX512BW-NEXT: retq
991 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
992 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
993 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
994 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
995 %zextd.vec = shufflevector <16 x i8> %in.vec.trunc, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
996 %out.bytevec.padded = shufflevector <16 x i8> %zextd.vec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
997 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
998 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
999 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1003 define void @vec128_v8i16_to_v4i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1004 ; SSE2-LABEL: vec128_v8i16_to_v4i32_factor2:
1006 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1007 ; SSE2-NEXT: paddb (%rsi), %xmm0
1008 ; SSE2-NEXT: pxor %xmm1, %xmm1
1009 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1010 ; SSE2-NEXT: paddb (%rdx), %xmm0
1011 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
1014 ; SSE42-LABEL: vec128_v8i16_to_v4i32_factor2:
1016 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1017 ; SSE42-NEXT: paddb (%rsi), %xmm0
1018 ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1019 ; SSE42-NEXT: paddb (%rdx), %xmm0
1020 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
1023 ; AVX-LABEL: vec128_v8i16_to_v4i32_factor2:
1025 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1026 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1027 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1028 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
1029 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
1032 ; AVX2-LABEL: vec128_v8i16_to_v4i32_factor2:
1034 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1035 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1036 ; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1037 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1038 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1039 ; AVX2-NEXT: vzeroupper
1042 ; AVX512F-LABEL: vec128_v8i16_to_v4i32_factor2:
1044 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1045 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1046 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1047 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1048 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1049 ; AVX512F-NEXT: vzeroupper
1050 ; AVX512F-NEXT: retq
1052 ; AVX512BW-LABEL: vec128_v8i16_to_v4i32_factor2:
1053 ; AVX512BW: # %bb.0:
1054 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1055 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1056 ; AVX512BW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1057 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1058 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1059 ; AVX512BW-NEXT: vzeroupper
1060 ; AVX512BW-NEXT: retq
1061 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1062 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1063 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1064 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1065 %in.vec.cast = bitcast <16 x i8> %in.vec.trunc to <8 x i16>
1066 %zextd.vec = shufflevector <8 x i16> %in.vec.cast, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
1067 %out.bytevec = bitcast <8 x i16> %zextd.vec to <16 x i8>
1068 %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1069 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1070 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1071 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1075 define void @vec128_v8i16_to_v2i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1076 ; SSE2-LABEL: vec128_v8i16_to_v2i64_factor4:
1078 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1079 ; SSE2-NEXT: paddb (%rsi), %xmm0
1080 ; SSE2-NEXT: pxor %xmm1, %xmm1
1081 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1082 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1083 ; SSE2-NEXT: paddb (%rdx), %xmm0
1084 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
1087 ; SSE42-LABEL: vec128_v8i16_to_v2i64_factor4:
1089 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1090 ; SSE42-NEXT: paddb (%rsi), %xmm0
1091 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1092 ; SSE42-NEXT: paddb (%rdx), %xmm0
1093 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
1096 ; AVX-LABEL: vec128_v8i16_to_v2i64_factor4:
1098 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1099 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1100 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1101 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
1102 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
1105 ; AVX2-LABEL: vec128_v8i16_to_v2i64_factor4:
1107 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1108 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1109 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1110 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1111 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1112 ; AVX2-NEXT: vzeroupper
1115 ; AVX512F-LABEL: vec128_v8i16_to_v2i64_factor4:
1117 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1118 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1119 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1120 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1121 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1122 ; AVX512F-NEXT: vzeroupper
1123 ; AVX512F-NEXT: retq
1125 ; AVX512BW-LABEL: vec128_v8i16_to_v2i64_factor4:
1126 ; AVX512BW: # %bb.0:
1127 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1128 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1129 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1130 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1131 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1132 ; AVX512BW-NEXT: vzeroupper
1133 ; AVX512BW-NEXT: retq
1134 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1135 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1136 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1137 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1138 %in.vec.cast = bitcast <16 x i8> %in.vec.trunc to <8 x i16>
1139 %zextd.vec = shufflevector <8 x i16> %in.vec.cast, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
1140 %out.bytevec = bitcast <8 x i16> %zextd.vec to <16 x i8>
1141 %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1142 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1143 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1144 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1148 define void @vec128_v8i16_to_v1i128_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1149 ; SSE2-LABEL: vec128_v8i16_to_v1i128_factor8:
1151 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1152 ; SSE2-NEXT: paddb (%rsi), %xmm0
1153 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1154 ; SSE2-NEXT: paddb (%rdx), %xmm0
1155 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
1158 ; SSE42-LABEL: vec128_v8i16_to_v1i128_factor8:
1160 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1161 ; SSE42-NEXT: paddb (%rsi), %xmm0
1162 ; SSE42-NEXT: pxor %xmm1, %xmm1
1163 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
1164 ; SSE42-NEXT: paddb (%rdx), %xmm1
1165 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
1168 ; AVX-LABEL: vec128_v8i16_to_v1i128_factor8:
1170 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1171 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1172 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
1173 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
1174 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
1175 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
1178 ; AVX2-LABEL: vec128_v8i16_to_v1i128_factor8:
1180 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1181 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1182 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
1183 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
1184 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1185 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1186 ; AVX2-NEXT: vzeroupper
1189 ; AVX512F-LABEL: vec128_v8i16_to_v1i128_factor8:
1191 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1192 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1193 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
1194 ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
1195 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1196 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1197 ; AVX512F-NEXT: vzeroupper
1198 ; AVX512F-NEXT: retq
1200 ; AVX512BW-LABEL: vec128_v8i16_to_v1i128_factor8:
1201 ; AVX512BW: # %bb.0:
1202 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1203 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1204 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
1205 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
1206 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1207 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1208 ; AVX512BW-NEXT: vzeroupper
1209 ; AVX512BW-NEXT: retq
1210 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1211 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1212 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1213 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1214 %in.vec.cast = bitcast <16 x i8> %in.vec.trunc to <8 x i16>
1215 %zextd.vec = shufflevector <8 x i16> %in.vec.cast, <8 x i16> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1216 %out.bytevec = bitcast <8 x i16> %zextd.vec to <16 x i8>
1217 %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1218 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1219 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1220 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1224 define void @vec128_v4i32_to_v2i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1225 ; SSE2-LABEL: vec128_v4i32_to_v2i64_factor2:
1227 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1228 ; SSE2-NEXT: paddb (%rsi), %xmm0
1229 ; SSE2-NEXT: pxor %xmm1, %xmm1
1230 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
1231 ; SSE2-NEXT: paddb (%rdx), %xmm0
1232 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
1235 ; SSE42-LABEL: vec128_v4i32_to_v2i64_factor2:
1237 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1238 ; SSE42-NEXT: paddb (%rsi), %xmm0
1239 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
1240 ; SSE42-NEXT: paddb (%rdx), %xmm0
1241 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
1244 ; AVX-LABEL: vec128_v4i32_to_v2i64_factor2:
1246 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1247 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1248 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
1249 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
1250 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
1253 ; AVX2-LABEL: vec128_v4i32_to_v2i64_factor2:
1255 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1256 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1257 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
1258 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1259 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1260 ; AVX2-NEXT: vzeroupper
1263 ; AVX512F-LABEL: vec128_v4i32_to_v2i64_factor2:
1265 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1266 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1267 ; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
1268 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1269 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1270 ; AVX512F-NEXT: vzeroupper
1271 ; AVX512F-NEXT: retq
1273 ; AVX512BW-LABEL: vec128_v4i32_to_v2i64_factor2:
1274 ; AVX512BW: # %bb.0:
1275 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1276 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1277 ; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
1278 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1279 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1280 ; AVX512BW-NEXT: vzeroupper
1281 ; AVX512BW-NEXT: retq
1282 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1283 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1284 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1285 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1286 %in.vec.cast = bitcast <16 x i8> %in.vec.trunc to <4 x i32>
1287 %zextd.vec = shufflevector <4 x i32> %in.vec.cast, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
1288 %out.bytevec = bitcast <4 x i32> %zextd.vec to <16 x i8>
1289 %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1290 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1291 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1292 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1296 define void @vec128_v4i32_to_v1i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1297 ; SSE2-LABEL: vec128_v4i32_to_v1i128_factor4:
1299 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1300 ; SSE2-NEXT: paddb (%rsi), %xmm0
1301 ; SSE2-NEXT: xorps %xmm1, %xmm1
1302 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
1303 ; SSE2-NEXT: paddb (%rdx), %xmm1
1304 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
1307 ; SSE42-LABEL: vec128_v4i32_to_v1i128_factor4:
1309 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1310 ; SSE42-NEXT: paddb (%rsi), %xmm0
1311 ; SSE42-NEXT: pxor %xmm1, %xmm1
1312 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1313 ; SSE42-NEXT: paddb (%rdx), %xmm1
1314 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
1317 ; AVX-LABEL: vec128_v4i32_to_v1i128_factor4:
1319 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1320 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1321 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
1322 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1323 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
1324 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
1327 ; AVX2-LABEL: vec128_v4i32_to_v1i128_factor4:
1329 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1330 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1331 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
1332 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1333 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1334 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1335 ; AVX2-NEXT: vzeroupper
1338 ; AVX512F-LABEL: vec128_v4i32_to_v1i128_factor4:
1340 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1341 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1342 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
1343 ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1344 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1345 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1346 ; AVX512F-NEXT: vzeroupper
1347 ; AVX512F-NEXT: retq
1349 ; AVX512BW-LABEL: vec128_v4i32_to_v1i128_factor4:
1350 ; AVX512BW: # %bb.0:
1351 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1352 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1353 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
1354 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
1355 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1356 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1357 ; AVX512BW-NEXT: vzeroupper
1358 ; AVX512BW-NEXT: retq
1359 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1360 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1361 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1362 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1363 %in.vec.cast = bitcast <16 x i8> %in.vec.trunc to <4 x i32>
1364 %zextd.vec = shufflevector <4 x i32> %in.vec.cast, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
1365 %out.bytevec = bitcast <4 x i32> %zextd.vec to <16 x i8>
1366 %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1367 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1368 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1369 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1373 define void @vec128_v2i64_to_v1i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1374 ; SSE-LABEL: vec128_v2i64_to_v1i128_factor2:
1376 ; SSE-NEXT: movdqa (%rdi), %xmm0
1377 ; SSE-NEXT: paddb (%rsi), %xmm0
1378 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
1379 ; SSE-NEXT: paddb (%rdx), %xmm0
1380 ; SSE-NEXT: movdqa %xmm0, (%rcx)
1383 ; AVX-LABEL: vec128_v2i64_to_v1i128_factor2:
1385 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1386 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1387 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1388 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
1389 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
1392 ; AVX2-LABEL: vec128_v2i64_to_v1i128_factor2:
1394 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1395 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1396 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1397 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1398 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1399 ; AVX2-NEXT: vzeroupper
1402 ; AVX512F-LABEL: vec128_v2i64_to_v1i128_factor2:
1404 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1405 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1406 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1407 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1408 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1409 ; AVX512F-NEXT: vzeroupper
1410 ; AVX512F-NEXT: retq
1412 ; AVX512BW-LABEL: vec128_v2i64_to_v1i128_factor2:
1413 ; AVX512BW: # %bb.0:
1414 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1415 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1416 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
1417 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1418 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1419 ; AVX512BW-NEXT: vzeroupper
1420 ; AVX512BW-NEXT: retq
1421 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1422 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1423 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1424 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
1425 %in.vec.cast = bitcast <16 x i8> %in.vec.trunc to <2 x i64>
1426 %zextd.vec = shufflevector <2 x i64> %in.vec.cast, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 3>
1427 %out.bytevec = bitcast <2 x i64> %zextd.vec to <16 x i8>
1428 %out.bytevec.padded = shufflevector <16 x i8> %out.bytevec, <16 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1429 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1430 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1431 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1435 define void @vec256_v32i8_to_v16i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1436 ; SSE2-LABEL: vec256_v32i8_to_v16i16_factor2:
1438 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1439 ; SSE2-NEXT: paddb (%rsi), %xmm0
1440 ; SSE2-NEXT: pxor %xmm1, %xmm1
1441 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1442 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
1443 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
1444 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
1445 ; SSE2-NEXT: paddb (%rdx), %xmm2
1446 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
1447 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
1450 ; SSE42-LABEL: vec256_v32i8_to_v16i16_factor2:
1452 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1453 ; SSE42-NEXT: paddb (%rsi), %xmm0
1454 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1455 ; SSE42-NEXT: pxor %xmm2, %xmm2
1456 ; SSE42-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1457 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
1458 ; SSE42-NEXT: paddb (%rdx), %xmm1
1459 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
1460 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
1463 ; AVX-LABEL: vec256_v32i8_to_v16i16_factor2:
1465 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1466 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1467 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1468 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
1469 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
1470 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
1471 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
1472 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
1473 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
1476 ; AVX2-LABEL: vec256_v32i8_to_v16i16_factor2:
1478 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1479 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1480 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1481 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1482 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1483 ; AVX2-NEXT: vzeroupper
1486 ; AVX512F-LABEL: vec256_v32i8_to_v16i16_factor2:
1488 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1489 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1490 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1491 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1492 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1493 ; AVX512F-NEXT: vzeroupper
1494 ; AVX512F-NEXT: retq
1496 ; AVX512BW-LABEL: vec256_v32i8_to_v16i16_factor2:
1497 ; AVX512BW: # %bb.0:
1498 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1499 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1500 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
1501 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1502 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1503 ; AVX512BW-NEXT: vzeroupper
1504 ; AVX512BW-NEXT: retq
1505 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1506 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1507 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1508 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1509 %zextd.vec = shufflevector <32 x i8> %in.vec.trunc, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 1, i32 35, i32 2, i32 37, i32 3, i32 39, i32 4, i32 41, i32 5, i32 43, i32 6, i32 45, i32 7, i32 47, i32 8, i32 49, i32 9, i32 51, i32 10, i32 53, i32 11, i32 55, i32 12, i32 57, i32 13, i32 59, i32 14, i32 61, i32 15, i32 63>
1510 %out.bytevec.padded = shufflevector <32 x i8> %zextd.vec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1511 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1512 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1513 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1517 define void @vec256_v32i8_to_v8i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1518 ; SSE2-LABEL: vec256_v32i8_to_v8i32_factor4:
1520 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1521 ; SSE2-NEXT: paddb (%rsi), %xmm0
1522 ; SSE2-NEXT: pxor %xmm1, %xmm1
1523 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1524 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1525 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1526 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1527 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
1528 ; SSE2-NEXT: paddb (%rdx), %xmm2
1529 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
1530 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
1533 ; SSE42-LABEL: vec256_v32i8_to_v8i32_factor4:
1535 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1536 ; SSE42-NEXT: paddb (%rsi), %xmm0
1537 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1538 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1539 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1540 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
1541 ; SSE42-NEXT: paddb (%rdx), %xmm1
1542 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
1543 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
1546 ; AVX-LABEL: vec256_v32i8_to_v8i32_factor4:
1548 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1549 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1550 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1551 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1552 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1553 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
1554 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
1555 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
1556 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
1559 ; AVX2-LABEL: vec256_v32i8_to_v8i32_factor4:
1561 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1562 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1563 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
1564 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1565 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1566 ; AVX2-NEXT: vzeroupper
1569 ; AVX512F-LABEL: vec256_v32i8_to_v8i32_factor4:
1571 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1572 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1573 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
1574 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1575 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1576 ; AVX512F-NEXT: vzeroupper
1577 ; AVX512F-NEXT: retq
1579 ; AVX512BW-LABEL: vec256_v32i8_to_v8i32_factor4:
1580 ; AVX512BW: # %bb.0:
1581 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1582 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1583 ; AVX512BW-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
1584 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1585 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1586 ; AVX512BW-NEXT: vzeroupper
1587 ; AVX512BW-NEXT: retq
1588 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1589 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1590 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1591 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1592 %zextd.vec = shufflevector <32 x i8> %in.vec.trunc, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 1, i32 37, i32 38, i32 39, i32 2, i32 41, i32 42, i32 43, i32 3, i32 45, i32 46, i32 47, i32 4, i32 49, i32 50, i32 51, i32 5, i32 53, i32 54, i32 55, i32 6, i32 57, i32 58, i32 59, i32 7, i32 61, i32 62, i32 63>
1593 %out.bytevec.padded = shufflevector <32 x i8> %zextd.vec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1594 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1595 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1596 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1600 define void @vec256_v32i8_to_v4i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1601 ; SSE2-LABEL: vec256_v32i8_to_v4i64_factor8:
1603 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1604 ; SSE2-NEXT: paddb (%rsi), %xmm0
1605 ; SSE2-NEXT: pxor %xmm1, %xmm1
1606 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1607 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1608 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1609 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1610 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1611 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
1612 ; SSE2-NEXT: paddb (%rdx), %xmm2
1613 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
1614 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
1617 ; SSE42-LABEL: vec256_v32i8_to_v4i64_factor8:
1619 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1620 ; SSE42-NEXT: paddb (%rsi), %xmm0
1621 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
1622 ; SSE42-NEXT: psrld $16, %xmm0
1623 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
1624 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
1625 ; SSE42-NEXT: paddb (%rdx), %xmm1
1626 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
1627 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
1630 ; AVX-LABEL: vec256_v32i8_to_v4i64_factor8:
1632 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1633 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1634 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
1635 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
1636 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
1637 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
1638 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
1639 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
1640 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
1643 ; AVX2-LABEL: vec256_v32i8_to_v4i64_factor8:
1645 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1646 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1647 ; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
1648 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1649 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1650 ; AVX2-NEXT: vzeroupper
1653 ; AVX512F-LABEL: vec256_v32i8_to_v4i64_factor8:
1655 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1656 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1657 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
1658 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1659 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1660 ; AVX512F-NEXT: vzeroupper
1661 ; AVX512F-NEXT: retq
1663 ; AVX512BW-LABEL: vec256_v32i8_to_v4i64_factor8:
1664 ; AVX512BW: # %bb.0:
1665 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1666 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1667 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
1668 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1669 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1670 ; AVX512BW-NEXT: vzeroupper
1671 ; AVX512BW-NEXT: retq
1672 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1673 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1674 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1675 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1676 %zextd.vec = shufflevector <32 x i8> %in.vec.trunc, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 1, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 2, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 3, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1677 %out.bytevec.padded = shufflevector <32 x i8> %zextd.vec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1678 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1679 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1680 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1684 define void @vec256_v32i8_to_v2i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1685 ; SSE-LABEL: vec256_v32i8_to_v2i128_factor16:
1687 ; SSE-NEXT: movdqa (%rdi), %xmm0
1688 ; SSE-NEXT: paddb (%rsi), %xmm0
1689 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0]
1690 ; SSE-NEXT: pand %xmm0, %xmm1
1691 ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
1692 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1693 ; SSE-NEXT: paddb 16(%rdx), %xmm0
1694 ; SSE-NEXT: paddb (%rdx), %xmm1
1695 ; SSE-NEXT: movdqa %xmm1, (%rcx)
1696 ; SSE-NEXT: movdqa %xmm0, 16(%rcx)
1699 ; AVX-LABEL: vec256_v32i8_to_v2i128_factor16:
1701 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1702 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1703 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
1704 ; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
1705 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
1706 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
1707 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
1708 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
1709 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
1712 ; AVX2-LABEL: vec256_v32i8_to_v2i128_factor16:
1714 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1715 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1716 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
1717 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
1718 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1719 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1720 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1721 ; AVX2-NEXT: vzeroupper
1724 ; AVX512F-LABEL: vec256_v32i8_to_v2i128_factor16:
1726 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1727 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1728 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
1729 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
1730 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1731 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1732 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1733 ; AVX512F-NEXT: vzeroupper
1734 ; AVX512F-NEXT: retq
1736 ; AVX512BW-LABEL: vec256_v32i8_to_v2i128_factor16:
1737 ; AVX512BW: # %bb.0:
1738 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1739 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1740 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
1741 ; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
1742 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
1743 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1744 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1745 ; AVX512BW-NEXT: vzeroupper
1746 ; AVX512BW-NEXT: retq
1747 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1748 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1749 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1750 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1751 %zextd.vec = shufflevector <32 x i8> %in.vec.trunc, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 1, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1752 %out.bytevec.padded = shufflevector <32 x i8> %zextd.vec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1753 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1754 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1755 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1759 define void @vec256_v32i8_to_v1i256_factor32(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1760 ; SSE-LABEL: vec256_v32i8_to_v1i256_factor32:
1762 ; SSE-NEXT: movdqa (%rdi), %xmm0
1763 ; SSE-NEXT: paddb (%rsi), %xmm0
1764 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1765 ; SSE-NEXT: movaps 16(%rdx), %xmm1
1766 ; SSE-NEXT: paddb (%rdx), %xmm0
1767 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
1768 ; SSE-NEXT: movdqa %xmm0, (%rcx)
1771 ; AVX-LABEL: vec256_v32i8_to_v1i256_factor32:
1773 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1774 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1775 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1776 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
1777 ; AVX-NEXT: vmovaps 16(%rdx), %xmm1
1778 ; AVX-NEXT: vmovaps %xmm1, 16(%rcx)
1779 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
1782 ; AVX2-LABEL: vec256_v32i8_to_v1i256_factor32:
1784 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
1785 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
1786 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
1787 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
1788 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1789 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1790 ; AVX2-NEXT: vzeroupper
1793 ; AVX512F-LABEL: vec256_v32i8_to_v1i256_factor32:
1795 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
1796 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
1797 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
1798 ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
1799 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1800 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1801 ; AVX512F-NEXT: vzeroupper
1802 ; AVX512F-NEXT: retq
1804 ; AVX512BW-LABEL: vec256_v32i8_to_v1i256_factor32:
1805 ; AVX512BW: # %bb.0:
1806 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
1807 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
1808 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
1809 ; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
1810 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1811 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1812 ; AVX512BW-NEXT: vzeroupper
1813 ; AVX512BW-NEXT: retq
1814 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1815 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1816 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1817 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1818 %zextd.vec = shufflevector <32 x i8> %in.vec.trunc, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
1819 %out.bytevec.padded = shufflevector <32 x i8> %zextd.vec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1820 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1821 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1822 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1826 define void @vec256_v16i16_to_v8i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1827 ; SSE2-LABEL: vec256_v16i16_to_v8i32_factor2:
1829 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1830 ; SSE2-NEXT: paddb (%rsi), %xmm0
1831 ; SSE2-NEXT: pxor %xmm1, %xmm1
1832 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1833 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1834 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1835 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
1836 ; SSE2-NEXT: paddb (%rdx), %xmm2
1837 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
1838 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
1841 ; SSE42-LABEL: vec256_v16i16_to_v8i32_factor2:
1843 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1844 ; SSE42-NEXT: paddb (%rsi), %xmm0
1845 ; SSE42-NEXT: pxor %xmm1, %xmm1
1846 ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1847 ; SSE42-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
1848 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
1849 ; SSE42-NEXT: paddb (%rdx), %xmm2
1850 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
1851 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
1854 ; AVX-LABEL: vec256_v16i16_to_v8i32_factor2:
1856 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1857 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1858 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1859 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
1860 ; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
1861 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
1862 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
1863 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
1864 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
1867 ; AVX2-LABEL: vec256_v16i16_to_v8i32_factor2:
1869 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1870 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1871 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1872 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1873 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1874 ; AVX2-NEXT: vzeroupper
1877 ; AVX512F-LABEL: vec256_v16i16_to_v8i32_factor2:
1879 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1880 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1881 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1882 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1883 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1884 ; AVX512F-NEXT: vzeroupper
1885 ; AVX512F-NEXT: retq
1887 ; AVX512BW-LABEL: vec256_v16i16_to_v8i32_factor2:
1888 ; AVX512BW: # %bb.0:
1889 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1890 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1891 ; AVX512BW-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
1892 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1893 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1894 ; AVX512BW-NEXT: vzeroupper
1895 ; AVX512BW-NEXT: retq
1896 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1897 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1898 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1899 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1900 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <16 x i16>
1901 %zextd.vec = shufflevector <16 x i16> %in.vec.cast, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
1902 %out.bytevec = bitcast <16 x i16> %zextd.vec to <32 x i8>
1903 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1904 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1905 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1906 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1910 define void @vec256_v16i16_to_v4i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1911 ; SSE2-LABEL: vec256_v16i16_to_v4i64_factor4:
1913 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1914 ; SSE2-NEXT: paddb (%rsi), %xmm0
1915 ; SSE2-NEXT: pxor %xmm1, %xmm1
1916 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1917 ; SSE2-NEXT: movdqa %xmm0, %xmm2
1918 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
1919 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1920 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
1921 ; SSE2-NEXT: paddb (%rdx), %xmm2
1922 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
1923 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
1926 ; SSE42-LABEL: vec256_v16i16_to_v4i64_factor4:
1928 ; SSE42-NEXT: movdqa (%rdi), %xmm0
1929 ; SSE42-NEXT: paddb (%rsi), %xmm0
1930 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1931 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1932 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1933 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
1934 ; SSE42-NEXT: paddb (%rdx), %xmm1
1935 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
1936 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
1939 ; AVX-LABEL: vec256_v16i16_to_v4i64_factor4:
1941 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1942 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1943 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1944 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
1945 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
1946 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
1947 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
1948 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
1949 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
1952 ; AVX2-LABEL: vec256_v16i16_to_v4i64_factor4:
1954 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
1955 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1956 ; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1957 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1958 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
1959 ; AVX2-NEXT: vzeroupper
1962 ; AVX512F-LABEL: vec256_v16i16_to_v4i64_factor4:
1964 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1965 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1966 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1967 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
1968 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
1969 ; AVX512F-NEXT: vzeroupper
1970 ; AVX512F-NEXT: retq
1972 ; AVX512BW-LABEL: vec256_v16i16_to_v4i64_factor4:
1973 ; AVX512BW: # %bb.0:
1974 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
1975 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
1976 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
1977 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
1978 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
1979 ; AVX512BW-NEXT: vzeroupper
1980 ; AVX512BW-NEXT: retq
1981 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
1982 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
1983 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
1984 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
1985 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <16 x i16>
1986 %zextd.vec = shufflevector <16 x i16> %in.vec.cast, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
1987 %out.bytevec = bitcast <16 x i16> %zextd.vec to <32 x i8>
1988 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1989 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
1990 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
1991 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
1995 define void @vec256_v16i16_to_v2i128_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
1996 ; SSE2-LABEL: vec256_v16i16_to_v2i128_factor8:
1998 ; SSE2-NEXT: movdqa (%rdi), %xmm0
1999 ; SSE2-NEXT: paddb (%rsi), %xmm0
2000 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0]
2001 ; SSE2-NEXT: pand %xmm0, %xmm1
2002 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
2003 ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2004 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
2005 ; SSE2-NEXT: paddb (%rdx), %xmm1
2006 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
2007 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
2010 ; SSE42-LABEL: vec256_v16i16_to_v2i128_factor8:
2012 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2013 ; SSE42-NEXT: paddb (%rsi), %xmm0
2014 ; SSE42-NEXT: pxor %xmm1, %xmm1
2015 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
2016 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
2017 ; SSE42-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2018 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
2019 ; SSE42-NEXT: paddb (%rdx), %xmm1
2020 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
2021 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
2024 ; AVX-LABEL: vec256_v16i16_to_v2i128_factor8:
2026 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2027 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2028 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
2029 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
2030 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
2031 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2032 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
2033 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
2034 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
2035 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
2038 ; AVX2-LABEL: vec256_v16i16_to_v2i128_factor8:
2040 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
2041 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2042 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
2043 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
2044 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
2045 ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
2046 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2047 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
2048 ; AVX2-NEXT: vzeroupper
2051 ; AVX512F-LABEL: vec256_v16i16_to_v2i128_factor8:
2053 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
2054 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2055 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
2056 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
2057 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
2058 ; AVX512F-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15]
2059 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2060 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2061 ; AVX512F-NEXT: vzeroupper
2062 ; AVX512F-NEXT: retq
2064 ; AVX512BW-LABEL: vec256_v16i16_to_v2i128_factor8:
2065 ; AVX512BW: # %bb.0:
2066 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2067 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2068 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,3,4,5,6,7,17,9,10,11,12,13,14,15]
2069 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
2070 ; AVX512BW-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
2071 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0
2072 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2073 ; AVX512BW-NEXT: vzeroupper
2074 ; AVX512BW-NEXT: retq
2075 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2076 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2077 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2078 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2079 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <16 x i16>
2080 %zextd.vec = shufflevector <16 x i16> %in.vec.cast, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 1, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2081 %out.bytevec = bitcast <16 x i16> %zextd.vec to <32 x i8>
2082 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2083 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2084 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2085 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2089 define void @vec256_v16i16_to_v1i256_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2090 ; SSE2-LABEL: vec256_v16i16_to_v1i256_factor16:
2092 ; SSE2-NEXT: movdqa (%rdi), %xmm0
2093 ; SSE2-NEXT: paddb (%rsi), %xmm0
2094 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
2095 ; SSE2-NEXT: movaps 16(%rdx), %xmm1
2096 ; SSE2-NEXT: paddb (%rdx), %xmm0
2097 ; SSE2-NEXT: movaps %xmm1, 16(%rcx)
2098 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
2101 ; SSE42-LABEL: vec256_v16i16_to_v1i256_factor16:
2103 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2104 ; SSE42-NEXT: paddb (%rsi), %xmm0
2105 ; SSE42-NEXT: pxor %xmm1, %xmm1
2106 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
2107 ; SSE42-NEXT: movaps 16(%rdx), %xmm0
2108 ; SSE42-NEXT: paddb (%rdx), %xmm1
2109 ; SSE42-NEXT: movaps %xmm0, 16(%rcx)
2110 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
2113 ; AVX-LABEL: vec256_v16i16_to_v1i256_factor16:
2115 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2116 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2117 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
2118 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
2119 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
2120 ; AVX-NEXT: vmovaps 16(%rdx), %xmm1
2121 ; AVX-NEXT: vmovaps %xmm1, 16(%rcx)
2122 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
2125 ; AVX2-LABEL: vec256_v16i16_to_v1i256_factor16:
2127 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
2128 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2129 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
2130 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
2131 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2132 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
2133 ; AVX2-NEXT: vzeroupper
2136 ; AVX512F-LABEL: vec256_v16i16_to_v1i256_factor16:
2138 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
2139 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2140 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
2141 ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
2142 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2143 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2144 ; AVX512F-NEXT: vzeroupper
2145 ; AVX512F-NEXT: retq
2147 ; AVX512BW-LABEL: vec256_v16i16_to_v1i256_factor16:
2148 ; AVX512BW: # %bb.0:
2149 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2150 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2151 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
2152 ; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
2153 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2154 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2155 ; AVX512BW-NEXT: vzeroupper
2156 ; AVX512BW-NEXT: retq
2157 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2158 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2159 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2160 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2161 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <16 x i16>
2162 %zextd.vec = shufflevector <16 x i16> %in.vec.cast, <16 x i16> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2163 %out.bytevec = bitcast <16 x i16> %zextd.vec to <32 x i8>
2164 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2165 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2166 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2167 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2171 define void @vec256_v8i32_to_v4i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2172 ; SSE2-LABEL: vec256_v8i32_to_v4i64_factor2:
2174 ; SSE2-NEXT: movdqa (%rdi), %xmm0
2175 ; SSE2-NEXT: paddb (%rsi), %xmm0
2176 ; SSE2-NEXT: pxor %xmm1, %xmm1
2177 ; SSE2-NEXT: movdqa %xmm0, %xmm2
2178 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
2179 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2180 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
2181 ; SSE2-NEXT: paddb (%rdx), %xmm2
2182 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
2183 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
2186 ; SSE42-LABEL: vec256_v8i32_to_v4i64_factor2:
2188 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2189 ; SSE42-NEXT: paddb (%rsi), %xmm0
2190 ; SSE42-NEXT: pxor %xmm1, %xmm1
2191 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
2192 ; SSE42-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
2193 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
2194 ; SSE42-NEXT: paddb (%rdx), %xmm2
2195 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
2196 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
2199 ; AVX-LABEL: vec256_v8i32_to_v4i64_factor2:
2201 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2202 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2203 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
2204 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
2205 ; AVX-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
2206 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
2207 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
2208 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
2209 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
2212 ; AVX2-LABEL: vec256_v8i32_to_v4i64_factor2:
2214 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
2215 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2216 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
2217 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2218 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
2219 ; AVX2-NEXT: vzeroupper
2222 ; AVX512F-LABEL: vec256_v8i32_to_v4i64_factor2:
2224 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
2225 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2226 ; AVX512F-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
2227 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2228 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2229 ; AVX512F-NEXT: vzeroupper
2230 ; AVX512F-NEXT: retq
2232 ; AVX512BW-LABEL: vec256_v8i32_to_v4i64_factor2:
2233 ; AVX512BW: # %bb.0:
2234 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
2235 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2236 ; AVX512BW-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
2237 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2238 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2239 ; AVX512BW-NEXT: vzeroupper
2240 ; AVX512BW-NEXT: retq
2241 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2242 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2243 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2244 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2245 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <8 x i32>
2246 %zextd.vec = shufflevector <8 x i32> %in.vec.cast, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
2247 %out.bytevec = bitcast <8 x i32> %zextd.vec to <32 x i8>
2248 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2249 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2250 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2251 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2255 define void @vec256_v8i32_to_v2i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2256 ; SSE2-LABEL: vec256_v8i32_to_v2i128_factor4:
2258 ; SSE2-NEXT: movdqa (%rdi), %xmm0
2259 ; SSE2-NEXT: paddb (%rsi), %xmm0
2260 ; SSE2-NEXT: xorps %xmm1, %xmm1
2261 ; SSE2-NEXT: xorps %xmm2, %xmm2
2262 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
2263 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[1,0]
2264 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
2265 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
2266 ; SSE2-NEXT: paddb (%rdx), %xmm2
2267 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
2268 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
2271 ; SSE42-LABEL: vec256_v8i32_to_v2i128_factor4:
2273 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2274 ; SSE42-NEXT: paddb (%rsi), %xmm0
2275 ; SSE42-NEXT: pxor %xmm1, %xmm1
2276 ; SSE42-NEXT: pxor %xmm2, %xmm2
2277 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
2278 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2279 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
2280 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
2281 ; SSE42-NEXT: paddb (%rdx), %xmm2
2282 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
2283 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
2286 ; AVX-LABEL: vec256_v8i32_to_v2i128_factor4:
2288 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2289 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2290 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
2291 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
2292 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
2293 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
2294 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
2295 ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1
2296 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
2297 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
2298 ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx)
2299 ; AVX-NEXT: vzeroupper
2302 ; AVX2-SLOW-LABEL: vec256_v8i32_to_v2i128_factor4:
2303 ; AVX2-SLOW: # %bb.0:
2304 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
2305 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2306 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
2307 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
2308 ; AVX2-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
2309 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
2310 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2311 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
2312 ; AVX2-SLOW-NEXT: vzeroupper
2313 ; AVX2-SLOW-NEXT: retq
2315 ; AVX2-FAST-PERLANE-LABEL: vec256_v8i32_to_v2i128_factor4:
2316 ; AVX2-FAST-PERLANE: # %bb.0:
2317 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
2318 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2319 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
2320 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
2321 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm1, %xmm1, %xmm1
2322 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
2323 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2324 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rcx)
2325 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
2326 ; AVX2-FAST-PERLANE-NEXT: retq
2328 ; AVX2-FAST-LABEL: vec256_v8i32_to_v2i128_factor4:
2329 ; AVX2-FAST: # %bb.0:
2330 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
2331 ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2332 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,u,u,u,1,u,u,u]
2333 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
2334 ; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
2335 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
2336 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2337 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rcx)
2338 ; AVX2-FAST-NEXT: vzeroupper
2339 ; AVX2-FAST-NEXT: retq
2341 ; AVX512F-LABEL: vec256_v8i32_to_v2i128_factor4:
2343 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
2344 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2345 ; AVX512F-NEXT: movb $17, %al
2346 ; AVX512F-NEXT: kmovw %eax, %k1
2347 ; AVX512F-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
2348 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2349 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2350 ; AVX512F-NEXT: vzeroupper
2351 ; AVX512F-NEXT: retq
2353 ; AVX512BW-LABEL: vec256_v8i32_to_v2i128_factor4:
2354 ; AVX512BW: # %bb.0:
2355 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2356 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2357 ; AVX512BW-NEXT: movb $17, %al
2358 ; AVX512BW-NEXT: kmovd %eax, %k1
2359 ; AVX512BW-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
2360 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2361 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2362 ; AVX512BW-NEXT: vzeroupper
2363 ; AVX512BW-NEXT: retq
2364 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2365 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2366 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2367 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2368 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <8 x i32>
2369 %zextd.vec = shufflevector <8 x i32> %in.vec.cast, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
2370 %out.bytevec = bitcast <8 x i32> %zextd.vec to <32 x i8>
2371 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2372 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2373 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2374 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2378 define void @vec256_v8i32_to_v1i256_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2379 ; SSE2-LABEL: vec256_v8i32_to_v1i256_factor8:
2381 ; SSE2-NEXT: movdqa (%rdi), %xmm0
2382 ; SSE2-NEXT: paddb (%rsi), %xmm0
2383 ; SSE2-NEXT: xorps %xmm1, %xmm1
2384 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
2385 ; SSE2-NEXT: movaps 16(%rdx), %xmm0
2386 ; SSE2-NEXT: paddb (%rdx), %xmm1
2387 ; SSE2-NEXT: movaps %xmm0, 16(%rcx)
2388 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
2391 ; SSE42-LABEL: vec256_v8i32_to_v1i256_factor8:
2393 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2394 ; SSE42-NEXT: paddb (%rsi), %xmm0
2395 ; SSE42-NEXT: pxor %xmm1, %xmm1
2396 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
2397 ; SSE42-NEXT: movaps 16(%rdx), %xmm0
2398 ; SSE42-NEXT: paddb (%rdx), %xmm1
2399 ; SSE42-NEXT: movaps %xmm0, 16(%rcx)
2400 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
2403 ; AVX-LABEL: vec256_v8i32_to_v1i256_factor8:
2405 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2406 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2407 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
2408 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
2409 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
2410 ; AVX-NEXT: vmovaps 16(%rdx), %xmm1
2411 ; AVX-NEXT: vmovaps %xmm1, 16(%rcx)
2412 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
2415 ; AVX2-LABEL: vec256_v8i32_to_v1i256_factor8:
2417 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
2418 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2419 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
2420 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
2421 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2422 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
2423 ; AVX2-NEXT: vzeroupper
2426 ; AVX512F-LABEL: vec256_v8i32_to_v1i256_factor8:
2428 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
2429 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2430 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
2431 ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
2432 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2433 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2434 ; AVX512F-NEXT: vzeroupper
2435 ; AVX512F-NEXT: retq
2437 ; AVX512BW-LABEL: vec256_v8i32_to_v1i256_factor8:
2438 ; AVX512BW: # %bb.0:
2439 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2440 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2441 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
2442 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
2443 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2444 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2445 ; AVX512BW-NEXT: vzeroupper
2446 ; AVX512BW-NEXT: retq
2447 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2448 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2449 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2450 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2451 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <8 x i32>
2452 %zextd.vec = shufflevector <8 x i32> %in.vec.cast, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
2453 %out.bytevec = bitcast <8 x i32> %zextd.vec to <32 x i8>
2454 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2455 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2456 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2457 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2461 define void @vec256_v4i64_to_v2i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2462 ; SSE-LABEL: vec256_v4i64_to_v2i128_factor2:
2464 ; SSE-NEXT: movdqa (%rdi), %xmm0
2465 ; SSE-NEXT: paddb (%rsi), %xmm0
2466 ; SSE-NEXT: movq {{.*#+}} xmm1 = xmm0[0],zero
2467 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
2468 ; SSE-NEXT: paddb 16(%rdx), %xmm0
2469 ; SSE-NEXT: paddb (%rdx), %xmm1
2470 ; SSE-NEXT: movdqa %xmm1, (%rcx)
2471 ; SSE-NEXT: movdqa %xmm0, 16(%rcx)
2474 ; AVX-LABEL: vec256_v4i64_to_v2i128_factor2:
2476 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2477 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2478 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
2479 ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
2480 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[3],ymm1[3]
2481 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
2482 ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1
2483 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
2484 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
2485 ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx)
2486 ; AVX-NEXT: vzeroupper
2489 ; AVX2-LABEL: vec256_v4i64_to_v2i128_factor2:
2491 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
2492 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2493 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
2494 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
2495 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
2496 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2497 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
2498 ; AVX2-NEXT: vzeroupper
2501 ; AVX512F-LABEL: vec256_v4i64_to_v2i128_factor2:
2503 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
2504 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2505 ; AVX512F-NEXT: movb $5, %al
2506 ; AVX512F-NEXT: kmovw %eax, %k1
2507 ; AVX512F-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
2508 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2509 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2510 ; AVX512F-NEXT: vzeroupper
2511 ; AVX512F-NEXT: retq
2513 ; AVX512BW-LABEL: vec256_v4i64_to_v2i128_factor2:
2514 ; AVX512BW: # %bb.0:
2515 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2516 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2517 ; AVX512BW-NEXT: movb $5, %al
2518 ; AVX512BW-NEXT: kmovd %eax, %k1
2519 ; AVX512BW-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
2520 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2521 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2522 ; AVX512BW-NEXT: vzeroupper
2523 ; AVX512BW-NEXT: retq
2524 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2525 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2526 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2527 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2528 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <4 x i64>
2529 %zextd.vec = shufflevector <4 x i64> %in.vec.cast, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
2530 %out.bytevec = bitcast <4 x i64> %zextd.vec to <32 x i8>
2531 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2532 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2533 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2534 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2538 define void @vec256_v4i64_to_v1i256_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2539 ; SSE-LABEL: vec256_v4i64_to_v1i256_factor4:
2541 ; SSE-NEXT: movdqa (%rdi), %xmm0
2542 ; SSE-NEXT: paddb (%rsi), %xmm0
2543 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
2544 ; SSE-NEXT: movaps 16(%rdx), %xmm1
2545 ; SSE-NEXT: paddb (%rdx), %xmm0
2546 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
2547 ; SSE-NEXT: movdqa %xmm0, (%rcx)
2550 ; AVX-LABEL: vec256_v4i64_to_v1i256_factor4:
2552 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2553 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2554 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
2555 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
2556 ; AVX-NEXT: vmovaps 16(%rdx), %xmm1
2557 ; AVX-NEXT: vmovaps %xmm1, 16(%rcx)
2558 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
2561 ; AVX2-LABEL: vec256_v4i64_to_v1i256_factor4:
2563 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
2564 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2565 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
2566 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2567 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
2568 ; AVX2-NEXT: vzeroupper
2571 ; AVX512F-LABEL: vec256_v4i64_to_v1i256_factor4:
2573 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
2574 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2575 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
2576 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2577 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2578 ; AVX512F-NEXT: vzeroupper
2579 ; AVX512F-NEXT: retq
2581 ; AVX512BW-LABEL: vec256_v4i64_to_v1i256_factor4:
2582 ; AVX512BW: # %bb.0:
2583 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2584 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2585 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
2586 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2587 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2588 ; AVX512BW-NEXT: vzeroupper
2589 ; AVX512BW-NEXT: retq
2590 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2591 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2592 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2593 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2594 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <4 x i64>
2595 %zextd.vec = shufflevector <4 x i64> %in.vec.cast, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
2596 %out.bytevec = bitcast <4 x i64> %zextd.vec to <32 x i8>
2597 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2598 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2599 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2600 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2604 define void @vec256_v2i128_to_v1i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2605 ; SSE-LABEL: vec256_v2i128_to_v1i256_factor2:
2607 ; SSE-NEXT: movdqa (%rdi), %xmm0
2608 ; SSE-NEXT: paddb (%rsi), %xmm0
2609 ; SSE-NEXT: movaps 16(%rdx), %xmm1
2610 ; SSE-NEXT: paddb (%rdx), %xmm0
2611 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
2612 ; SSE-NEXT: movdqa %xmm0, (%rcx)
2615 ; AVX-LABEL: vec256_v2i128_to_v1i256_factor2:
2617 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2618 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2619 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
2620 ; AVX-NEXT: vmovaps 16(%rdx), %xmm1
2621 ; AVX-NEXT: vmovaps %xmm1, 16(%rcx)
2622 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
2625 ; AVX2-LABEL: vec256_v2i128_to_v1i256_factor2:
2627 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
2628 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2629 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2630 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
2631 ; AVX2-NEXT: vzeroupper
2634 ; AVX512F-LABEL: vec256_v2i128_to_v1i256_factor2:
2636 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
2637 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2638 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
2639 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
2640 ; AVX512F-NEXT: vzeroupper
2641 ; AVX512F-NEXT: retq
2643 ; AVX512BW-LABEL: vec256_v2i128_to_v1i256_factor2:
2644 ; AVX512BW: # %bb.0:
2645 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
2646 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2647 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2648 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2649 ; AVX512BW-NEXT: vzeroupper
2650 ; AVX512BW-NEXT: retq
2651 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2652 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2653 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2654 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
2655 %in.vec.cast = bitcast <32 x i8> %in.vec.trunc to <2 x i128>
2656 %zextd.vec = shufflevector <2 x i128> %in.vec.cast, <2 x i128> zeroinitializer, <2 x i32> <i32 0, i32 3>
2657 %out.bytevec = bitcast <2 x i128> %zextd.vec to <32 x i8>
2658 %out.bytevec.padded = shufflevector <32 x i8> %out.bytevec, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2659 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2660 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2661 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2665 define void @vec384_v48i8_to_v24i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2666 ; SSE2-LABEL: vec384_v48i8_to_v24i16_factor2:
2668 ; SSE2-NEXT: movdqa (%rdi), %xmm0
2669 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
2670 ; SSE2-NEXT: paddb (%rsi), %xmm0
2671 ; SSE2-NEXT: paddb 16(%rsi), %xmm1
2672 ; SSE2-NEXT: pxor %xmm2, %xmm2
2673 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
2674 ; SSE2-NEXT: movdqa %xmm0, %xmm3
2675 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
2676 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
2677 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
2678 ; SSE2-NEXT: paddb (%rdx), %xmm3
2679 ; SSE2-NEXT: paddb 32(%rdx), %xmm1
2680 ; SSE2-NEXT: movdqa %xmm1, 32(%rcx)
2681 ; SSE2-NEXT: movdqa %xmm3, (%rcx)
2682 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
2685 ; SSE42-LABEL: vec384_v48i8_to_v24i16_factor2:
2687 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2688 ; SSE42-NEXT: movdqa 16(%rdi), %xmm1
2689 ; SSE42-NEXT: paddb (%rsi), %xmm0
2690 ; SSE42-NEXT: paddb 16(%rsi), %xmm1
2691 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2692 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2693 ; SSE42-NEXT: pxor %xmm3, %xmm3
2694 ; SSE42-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
2695 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
2696 ; SSE42-NEXT: paddb (%rdx), %xmm2
2697 ; SSE42-NEXT: paddb 32(%rdx), %xmm1
2698 ; SSE42-NEXT: movdqa %xmm1, 32(%rcx)
2699 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
2700 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
2703 ; AVX-LABEL: vec384_v48i8_to_v24i16_factor2:
2705 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2706 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
2707 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
2708 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2709 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2710 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
2711 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
2712 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2713 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
2714 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
2715 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
2716 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
2717 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
2718 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
2721 ; AVX2-LABEL: vec384_v48i8_to_v24i16_factor2:
2723 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
2724 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2725 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
2726 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
2727 ; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2728 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2729 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2730 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
2731 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
2732 ; AVX2-NEXT: vzeroupper
2735 ; AVX512F-LABEL: vec384_v48i8_to_v24i16_factor2:
2737 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
2738 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2739 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
2740 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
2741 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2742 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2743 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2744 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
2745 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
2746 ; AVX512F-NEXT: vzeroupper
2747 ; AVX512F-NEXT: retq
2749 ; AVX512BW-LABEL: vec384_v48i8_to_v24i16_factor2:
2750 ; AVX512BW: # %bb.0:
2751 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2752 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2753 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
2754 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2755 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2756 ; AVX512BW-NEXT: vzeroupper
2757 ; AVX512BW-NEXT: retq
2758 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2759 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2760 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2761 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
2762 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 1, i32 51, i32 2, i32 53, i32 3, i32 55, i32 4, i32 57, i32 5, i32 59, i32 6, i32 61, i32 7, i32 63, i32 8, i32 65, i32 9, i32 67, i32 10, i32 69, i32 11, i32 71, i32 12, i32 73, i32 13, i32 75, i32 14, i32 77, i32 15, i32 79, i32 16, i32 81, i32 17, i32 83, i32 18, i32 85, i32 19, i32 87, i32 20, i32 89, i32 21, i32 91, i32 22, i32 93, i32 23, i32 95>
2763 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2764 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2765 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2766 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2770 define void @vec384_v48i8_to_v16i24_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2771 ; SSE2-LABEL: vec384_v48i8_to_v16i24_factor3:
2773 ; SSE2-NEXT: movdqa (%rdi), %xmm0
2774 ; SSE2-NEXT: paddb (%rsi), %xmm0
2775 ; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
2776 ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,5]
2777 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
2778 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
2779 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,1,2,2,4,5,6,7]
2780 ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
2781 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
2782 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
2783 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
2784 ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,6]
2785 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
2786 ; SSE2-NEXT: paddb (%rdx), %xmm0
2787 ; SSE2-NEXT: paddb 32(%rdx), %xmm2
2788 ; SSE2-NEXT: paddb 16(%rdx), %xmm1
2789 ; SSE2-NEXT: movdqa %xmm1, 16(%rcx)
2790 ; SSE2-NEXT: movdqa %xmm2, 32(%rcx)
2791 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
2794 ; SSE42-LABEL: vec384_v48i8_to_v16i24_factor3:
2796 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2797 ; SSE42-NEXT: paddb (%rsi), %xmm0
2798 ; SSE42-NEXT: movdqa %xmm0, %xmm1
2799 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[11],zero,zero,xmm1[12],zero,zero,xmm1[13],zero,zero,xmm1[14],zero,zero,xmm1[15],zero,zero
2800 ; SSE42-NEXT: movdqa %xmm0, %xmm2
2801 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0],zero,zero,xmm2[1],zero,zero,xmm2[2],zero,zero,xmm2[3],zero,zero,xmm2[4],zero,zero,xmm2[5]
2802 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,xmm0[6],zero,zero,xmm0[7],zero,zero,xmm0[8],zero,zero,xmm0[9],zero,zero,xmm0[10],zero
2803 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
2804 ; SSE42-NEXT: paddb (%rdx), %xmm2
2805 ; SSE42-NEXT: paddb 32(%rdx), %xmm1
2806 ; SSE42-NEXT: movdqa %xmm1, 32(%rcx)
2807 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
2808 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
2811 ; AVX-LABEL: vec384_v48i8_to_v16i24_factor3:
2813 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2814 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2815 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0],zero,zero,xmm0[1],zero,zero,xmm0[2],zero,zero,xmm0[3],zero,zero,xmm0[4],zero,zero,xmm0[5]
2816 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm0[6],zero,zero,xmm0[7],zero,zero,xmm0[8],zero,zero,xmm0[9],zero,zero,xmm0[10],zero
2817 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[11],zero,zero,xmm0[12],zero,zero,xmm0[13],zero,zero,xmm0[14],zero,zero,xmm0[15],zero,zero
2818 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
2819 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
2820 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
2821 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
2822 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
2823 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
2826 ; AVX2-LABEL: vec384_v48i8_to_v16i24_factor3:
2828 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
2829 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2830 ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,1,0,1]
2831 ; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0],zero,zero,ymm1[1],zero,zero,ymm1[2],zero,zero,ymm1[3],zero,zero,ymm1[4],zero,zero,ymm1[5],zero,zero,ymm1[22],zero,zero,ymm1[23],zero,zero,ymm1[24],zero,zero,ymm1[25],zero,zero,ymm1[26],zero
2832 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[11],zero,zero,xmm0[12],zero,zero,xmm0[13],zero,zero,xmm0[14],zero,zero,xmm0[15],zero,zero
2833 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2834 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2835 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
2836 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
2837 ; AVX2-NEXT: vzeroupper
2840 ; AVX512F-LABEL: vec384_v48i8_to_v16i24_factor3:
2842 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
2843 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2844 ; AVX512F-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,1,0,1]
2845 ; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0],zero,zero,ymm1[1],zero,zero,ymm1[2],zero,zero,ymm1[3],zero,zero,ymm1[4],zero,zero,ymm1[5],zero,zero,ymm1[22],zero,zero,ymm1[23],zero,zero,ymm1[24],zero,zero,ymm1[25],zero,zero,ymm1[26],zero
2846 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[11],zero,zero,xmm0[12],zero,zero,xmm0[13],zero,zero,xmm0[14],zero,zero,xmm0[15],zero,zero
2847 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2848 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2849 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
2850 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
2851 ; AVX512F-NEXT: vzeroupper
2852 ; AVX512F-NEXT: retq
2854 ; AVX512BW-LABEL: vec384_v48i8_to_v16i24_factor3:
2855 ; AVX512BW: # %bb.0:
2856 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
2857 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
2858 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,u,1,1,u,2,2,u,3,3,u,4,4,u,5]
2859 ; AVX512BW-NEXT: vpermw %ymm0, %ymm1, %ymm1
2860 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
2861 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[11],zero,zero,xmm0[12],zero,zero,xmm0[13],zero,zero,xmm0[14],zero,zero,xmm0[15],zero,zero
2862 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
2863 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2864 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2865 ; AVX512BW-NEXT: vzeroupper
2866 ; AVX512BW-NEXT: retq
2867 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2868 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2869 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2870 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
2871 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 1, i32 52, i32 53, i32 2, i32 55, i32 56, i32 3, i32 58, i32 59, i32 4, i32 61, i32 62, i32 5, i32 64, i32 65, i32 6, i32 67, i32 68, i32 7, i32 70, i32 71, i32 8, i32 73, i32 74, i32 9, i32 76, i32 77, i32 10, i32 79, i32 80, i32 11, i32 82, i32 83, i32 12, i32 85, i32 86, i32 13, i32 88, i32 89, i32 14, i32 91, i32 92, i32 15, i32 94, i32 95>
2872 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2873 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
2874 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
2875 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
2879 define void @vec384_v48i8_to_v12i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
2880 ; SSE2-LABEL: vec384_v48i8_to_v12i32_factor4:
2882 ; SSE2-NEXT: movdqa (%rdi), %xmm0
2883 ; SSE2-NEXT: paddb (%rsi), %xmm0
2884 ; SSE2-NEXT: pxor %xmm1, %xmm1
2885 ; SSE2-NEXT: movdqa %xmm0, %xmm2
2886 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
2887 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
2888 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
2889 ; SSE2-NEXT: movdqa %xmm0, %xmm3
2890 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
2891 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
2892 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
2893 ; SSE2-NEXT: paddb (%rdx), %xmm3
2894 ; SSE2-NEXT: paddb 32(%rdx), %xmm2
2895 ; SSE2-NEXT: movdqa %xmm2, 32(%rcx)
2896 ; SSE2-NEXT: movdqa %xmm3, (%rcx)
2897 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
2900 ; SSE42-LABEL: vec384_v48i8_to_v12i32_factor4:
2902 ; SSE42-NEXT: movdqa (%rdi), %xmm0
2903 ; SSE42-NEXT: paddb (%rsi), %xmm0
2904 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
2905 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
2906 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
2907 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
2908 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
2909 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
2910 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
2911 ; SSE42-NEXT: paddb (%rdx), %xmm1
2912 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
2913 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
2914 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
2917 ; AVX-LABEL: vec384_v48i8_to_v12i32_factor4:
2919 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2920 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2921 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
2922 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
2923 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
2924 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
2925 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
2926 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
2927 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
2928 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
2929 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
2930 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
2931 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
2934 ; AVX2-SLOW-LABEL: vec384_v48i8_to_v12i32_factor4:
2935 ; AVX2-SLOW: # %bb.0:
2936 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
2937 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2938 ; AVX2-SLOW-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
2939 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
2940 ; AVX2-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
2941 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2942 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2943 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
2944 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
2945 ; AVX2-SLOW-NEXT: vzeroupper
2946 ; AVX2-SLOW-NEXT: retq
2948 ; AVX2-FAST-PERLANE-LABEL: vec384_v48i8_to_v12i32_factor4:
2949 ; AVX2-FAST-PERLANE: # %bb.0:
2950 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
2951 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2952 ; AVX2-FAST-PERLANE-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
2953 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero
2954 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2955 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2956 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
2957 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
2958 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
2959 ; AVX2-FAST-PERLANE-NEXT: retq
2961 ; AVX2-FAST-LABEL: vec384_v48i8_to_v12i32_factor4:
2962 ; AVX2-FAST: # %bb.0:
2963 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
2964 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2965 ; AVX2-FAST-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
2966 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero
2967 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2968 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2969 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
2970 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
2971 ; AVX2-FAST-NEXT: vzeroupper
2972 ; AVX2-FAST-NEXT: retq
2974 ; AVX512F-LABEL: vec384_v48i8_to_v12i32_factor4:
2976 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
2977 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2978 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
2979 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero
2980 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
2981 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
2982 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
2983 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
2984 ; AVX512F-NEXT: vzeroupper
2985 ; AVX512F-NEXT: retq
2987 ; AVX512BW-LABEL: vec384_v48i8_to_v12i32_factor4:
2988 ; AVX512BW: # %bb.0:
2989 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
2990 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
2991 ; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
2992 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
2993 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
2994 ; AVX512BW-NEXT: vzeroupper
2995 ; AVX512BW-NEXT: retq
2996 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
2997 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
2998 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
2999 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3000 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 1, i32 53, i32 54, i32 55, i32 2, i32 57, i32 58, i32 59, i32 3, i32 61, i32 62, i32 63, i32 4, i32 65, i32 66, i32 67, i32 5, i32 69, i32 70, i32 71, i32 6, i32 73, i32 74, i32 75, i32 7, i32 77, i32 78, i32 79, i32 8, i32 81, i32 82, i32 83, i32 9, i32 85, i32 86, i32 87, i32 10, i32 89, i32 90, i32 91, i32 11, i32 93, i32 94, i32 95>
3001 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3002 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3003 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3004 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3008 define void @vec384_v48i8_to_v8i48_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3009 ; SSE2-LABEL: vec384_v48i8_to_v8i48_factor6:
3011 ; SSE2-NEXT: movdqa (%rdi), %xmm0
3012 ; SSE2-NEXT: paddb (%rsi), %xmm0
3013 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3014 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
3015 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
3016 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
3017 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
3018 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
3019 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3020 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
3021 ; SSE2-NEXT: paddb (%rdx), %xmm2
3022 ; SSE2-NEXT: paddb 32(%rdx), %xmm1
3023 ; SSE2-NEXT: movdqa %xmm1, 32(%rcx)
3024 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
3025 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
3028 ; SSE42-LABEL: vec384_v48i8_to_v8i48_factor6:
3030 ; SSE42-NEXT: movdqa (%rdi), %xmm0
3031 ; SSE42-NEXT: paddb (%rsi), %xmm0
3032 ; SSE42-NEXT: movdqa %xmm0, %xmm1
3033 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[6],zero,zero,zero,zero,zero,xmm1[7],zero,zero,zero,zero,zero
3034 ; SSE42-NEXT: movdqa %xmm0, %xmm2
3035 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,xmm2[2],zero,zero,zero
3036 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,xmm0[3],zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,xmm0[5],zero
3037 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
3038 ; SSE42-NEXT: paddb (%rdx), %xmm2
3039 ; SSE42-NEXT: paddb 32(%rdx), %xmm1
3040 ; SSE42-NEXT: movdqa %xmm1, 32(%rcx)
3041 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
3042 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
3045 ; AVX-LABEL: vec384_v48i8_to_v8i48_factor6:
3047 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3048 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3049 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero
3050 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,xmm0[3],zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,xmm0[5],zero
3051 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero
3052 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
3053 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
3054 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
3055 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
3056 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
3057 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
3060 ; AVX2-LABEL: vec384_v48i8_to_v8i48_factor6:
3062 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
3063 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3064 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero
3065 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,u,3,u,2,u,1,u,4,u,5,u,6,u,5,u]
3066 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
3067 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3068 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3069 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3070 ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx)
3071 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
3072 ; AVX2-NEXT: vzeroupper
3075 ; AVX512F-LABEL: vec384_v48i8_to_v8i48_factor6:
3077 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
3078 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3079 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero
3080 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,u,3,u,2,u,1,u,4,u,5,u,6,u,5,u]
3081 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
3082 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3083 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3084 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3085 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
3086 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
3087 ; AVX512F-NEXT: vzeroupper
3088 ; AVX512F-NEXT: retq
3090 ; AVX512BW-LABEL: vec384_v48i8_to_v8i48_factor6:
3091 ; AVX512BW: # %bb.0:
3092 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
3093 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3094 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3095 ; AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,3,0,1,4,0,2,5,0,3,0,1,4,0,2,5]
3096 ; AVX512BW-NEXT: # ymm2 = mem[0,1,0,1]
3097 ; AVX512BW-NEXT: vpermw %ymm1, %ymm2, %ymm1
3098 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3099 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero
3100 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
3101 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3102 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
3103 ; AVX512BW-NEXT: vzeroupper
3104 ; AVX512BW-NEXT: retq
3105 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3106 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3107 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3108 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3109 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 1, i32 55, i32 56, i32 57, i32 58, i32 59, i32 2, i32 61, i32 62, i32 63, i32 64, i32 65, i32 3, i32 67, i32 68, i32 69, i32 70, i32 71, i32 4, i32 73, i32 74, i32 75, i32 76, i32 77, i32 5, i32 79, i32 80, i32 81, i32 82, i32 83, i32 6, i32 85, i32 86, i32 87, i32 88, i32 89, i32 7, i32 91, i32 92, i32 93, i32 94, i32 95>
3110 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3111 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3112 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3113 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3117 define void @vec384_v48i8_to_v6i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3118 ; SSE2-LABEL: vec384_v48i8_to_v6i64_factor8:
3120 ; SSE2-NEXT: movdqa (%rdi), %xmm0
3121 ; SSE2-NEXT: paddb (%rsi), %xmm0
3122 ; SSE2-NEXT: pxor %xmm1, %xmm1
3123 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
3124 ; SSE2-NEXT: movdqa %xmm0, %xmm2
3125 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
3126 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
3127 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3128 ; SSE2-NEXT: movdqa %xmm0, %xmm3
3129 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
3130 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3131 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
3132 ; SSE2-NEXT: paddb (%rdx), %xmm3
3133 ; SSE2-NEXT: paddb 32(%rdx), %xmm2
3134 ; SSE2-NEXT: movdqa %xmm2, 32(%rcx)
3135 ; SSE2-NEXT: movdqa %xmm3, (%rcx)
3136 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
3139 ; SSE42-LABEL: vec384_v48i8_to_v6i64_factor8:
3141 ; SSE42-NEXT: movdqa (%rdi), %xmm0
3142 ; SSE42-NEXT: paddb (%rsi), %xmm0
3143 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3144 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
3145 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
3146 ; SSE42-NEXT: psrld $16, %xmm0
3147 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3148 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
3149 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
3150 ; SSE42-NEXT: paddb (%rdx), %xmm1
3151 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
3152 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
3153 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
3156 ; AVX-LABEL: vec384_v48i8_to_v6i64_factor8:
3158 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3159 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3160 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3161 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm2
3162 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
3163 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
3164 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3165 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
3166 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
3167 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
3168 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
3169 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
3170 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
3173 ; AVX2-SLOW-LABEL: vec384_v48i8_to_v6i64_factor8:
3174 ; AVX2-SLOW: # %bb.0:
3175 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
3176 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3177 ; AVX2-SLOW-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
3178 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
3179 ; AVX2-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3180 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3181 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3182 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
3183 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
3184 ; AVX2-SLOW-NEXT: vzeroupper
3185 ; AVX2-SLOW-NEXT: retq
3187 ; AVX2-FAST-PERLANE-LABEL: vec384_v48i8_to_v6i64_factor8:
3188 ; AVX2-FAST-PERLANE: # %bb.0:
3189 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
3190 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3191 ; AVX2-FAST-PERLANE-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
3192 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero
3193 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3194 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3195 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
3196 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
3197 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
3198 ; AVX2-FAST-PERLANE-NEXT: retq
3200 ; AVX2-FAST-LABEL: vec384_v48i8_to_v6i64_factor8:
3201 ; AVX2-FAST: # %bb.0:
3202 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
3203 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3204 ; AVX2-FAST-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
3205 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero
3206 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3207 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3208 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
3209 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
3210 ; AVX2-FAST-NEXT: vzeroupper
3211 ; AVX2-FAST-NEXT: retq
3213 ; AVX512F-LABEL: vec384_v48i8_to_v6i64_factor8:
3215 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
3216 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3217 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
3218 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero
3219 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3220 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3221 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
3222 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
3223 ; AVX512F-NEXT: vzeroupper
3224 ; AVX512F-NEXT: retq
3226 ; AVX512BW-LABEL: vec384_v48i8_to_v6i64_factor8:
3227 ; AVX512BW: # %bb.0:
3228 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
3229 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3230 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
3231 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3232 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
3233 ; AVX512BW-NEXT: vzeroupper
3234 ; AVX512BW-NEXT: retq
3235 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3236 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3237 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3238 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3239 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 1, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 2, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 3, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 4, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 5, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
3240 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3241 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3242 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3243 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3247 define void @vec384_v48i8_to_v4i96_factor12(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3248 ; SSE2-LABEL: vec384_v48i8_to_v4i96_factor12:
3250 ; SSE2-NEXT: movdqa (%rdi), %xmm0
3251 ; SSE2-NEXT: paddb (%rsi), %xmm0
3252 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
3253 ; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3254 ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
3255 ; SSE2-NEXT: movdqa %xmm0, %xmm2
3256 ; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2]
3257 ; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3258 ; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
3259 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3260 ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
3261 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
3262 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3263 ; SSE2-NEXT: paddb (%rdx), %xmm0
3264 ; SSE2-NEXT: paddb 16(%rdx), %xmm2
3265 ; SSE2-NEXT: paddb 32(%rdx), %xmm1
3266 ; SSE2-NEXT: movdqa %xmm1, 32(%rcx)
3267 ; SSE2-NEXT: movdqa %xmm2, 16(%rcx)
3268 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
3271 ; SSE42-LABEL: vec384_v48i8_to_v4i96_factor12:
3273 ; SSE42-NEXT: movdqa (%rdi), %xmm0
3274 ; SSE42-NEXT: paddb (%rsi), %xmm0
3275 ; SSE42-NEXT: movdqa %xmm0, %xmm1
3276 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3277 ; SSE42-NEXT: movdqa %xmm0, %xmm2
3278 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero
3279 ; SSE42-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero
3280 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
3281 ; SSE42-NEXT: paddb (%rdx), %xmm2
3282 ; SSE42-NEXT: paddb 32(%rdx), %xmm1
3283 ; SSE42-NEXT: movdqa %xmm1, 32(%rcx)
3284 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
3285 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
3288 ; AVX-LABEL: vec384_v48i8_to_v4i96_factor12:
3290 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3291 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3292 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero
3293 ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero
3294 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3295 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
3296 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
3297 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
3298 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
3299 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
3300 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
3303 ; AVX2-LABEL: vec384_v48i8_to_v4i96_factor12:
3305 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
3306 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3307 ; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3308 ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
3309 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
3310 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3311 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3312 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3313 ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx)
3314 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
3315 ; AVX2-NEXT: vzeroupper
3318 ; AVX512F-LABEL: vec384_v48i8_to_v4i96_factor12:
3320 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
3321 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3322 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3323 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
3324 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
3325 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3326 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3327 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3328 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
3329 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
3330 ; AVX512F-NEXT: vzeroupper
3331 ; AVX512F-NEXT: retq
3333 ; AVX512BW-LABEL: vec384_v48i8_to_v4i96_factor12:
3334 ; AVX512BW: # %bb.0:
3335 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
3336 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3337 ; AVX512BW-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
3338 ; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
3339 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3340 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3341 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
3342 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3343 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
3344 ; AVX512BW-NEXT: vzeroupper
3345 ; AVX512BW-NEXT: retq
3346 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3347 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3348 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3349 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3350 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 1, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 2, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 3, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
3351 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3352 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3353 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3354 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3358 define void @vec384_v48i8_to_v3i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3359 ; SSE-LABEL: vec384_v48i8_to_v3i128_factor16:
3361 ; SSE-NEXT: movdqa (%rdi), %xmm0
3362 ; SSE-NEXT: paddb (%rsi), %xmm0
3363 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0]
3364 ; SSE-NEXT: pand %xmm0, %xmm1
3365 ; SSE-NEXT: movdqa %xmm0, %xmm2
3366 ; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2]
3367 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3368 ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
3369 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3370 ; SSE-NEXT: paddb 16(%rdx), %xmm0
3371 ; SSE-NEXT: paddb 32(%rdx), %xmm2
3372 ; SSE-NEXT: paddb (%rdx), %xmm1
3373 ; SSE-NEXT: movdqa %xmm1, (%rcx)
3374 ; SSE-NEXT: movdqa %xmm2, 32(%rcx)
3375 ; SSE-NEXT: movdqa %xmm0, 16(%rcx)
3378 ; AVX-LABEL: vec384_v48i8_to_v3i128_factor16:
3380 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3381 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3382 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
3383 ; AVX-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
3384 ; AVX-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3385 ; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2]
3386 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3387 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
3388 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
3389 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
3390 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
3391 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
3392 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
3395 ; AVX2-SLOW-LABEL: vec384_v48i8_to_v3i128_factor16:
3396 ; AVX2-SLOW: # %bb.0:
3397 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
3398 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3399 ; AVX2-SLOW-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2]
3400 ; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3401 ; AVX2-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3402 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
3403 ; AVX2-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3404 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3405 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3406 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rcx)
3407 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
3408 ; AVX2-SLOW-NEXT: vzeroupper
3409 ; AVX2-SLOW-NEXT: retq
3411 ; AVX2-FAST-PERLANE-LABEL: vec384_v48i8_to_v3i128_factor16:
3412 ; AVX2-FAST-PERLANE: # %bb.0:
3413 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
3414 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3415 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3416 ; AVX2-FAST-PERLANE-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3417 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
3418 ; AVX2-FAST-PERLANE-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3419 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3420 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3421 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 32(%rcx)
3422 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rcx)
3423 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
3424 ; AVX2-FAST-PERLANE-NEXT: retq
3426 ; AVX2-FAST-LABEL: vec384_v48i8_to_v3i128_factor16:
3427 ; AVX2-FAST: # %bb.0:
3428 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
3429 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3430 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3431 ; AVX2-FAST-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3432 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
3433 ; AVX2-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3434 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3435 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3436 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rcx)
3437 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rcx)
3438 ; AVX2-FAST-NEXT: vzeroupper
3439 ; AVX2-FAST-NEXT: retq
3441 ; AVX512F-LABEL: vec384_v48i8_to_v3i128_factor16:
3443 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
3444 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3445 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3446 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3447 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
3448 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3449 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3450 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
3451 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
3452 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
3453 ; AVX512F-NEXT: vzeroupper
3454 ; AVX512F-NEXT: retq
3456 ; AVX512BW-LABEL: vec384_v48i8_to_v3i128_factor16:
3457 ; AVX512BW: # %bb.0:
3458 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
3459 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3460 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3461 ; AVX512BW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
3462 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3463 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3464 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
3465 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3466 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
3467 ; AVX512BW-NEXT: vzeroupper
3468 ; AVX512BW-NEXT: retq
3469 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3470 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3471 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3472 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3473 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 1, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 2, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
3474 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3475 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3476 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3477 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3481 define void @vec384_v48i8_to_v2i192_factor24(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3482 ; SSE2-LABEL: vec384_v48i8_to_v2i192_factor24:
3484 ; SSE2-NEXT: movdqa (%rdi), %xmm0
3485 ; SSE2-NEXT: paddb (%rsi), %xmm0
3486 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0]
3487 ; SSE2-NEXT: pand %xmm0, %xmm1
3488 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
3489 ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
3490 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
3491 ; SSE2-NEXT: movaps 32(%rdx), %xmm2
3492 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
3493 ; SSE2-NEXT: paddb (%rdx), %xmm1
3494 ; SSE2-NEXT: movaps %xmm2, 32(%rcx)
3495 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
3496 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
3499 ; SSE42-LABEL: vec384_v48i8_to_v2i192_factor24:
3501 ; SSE42-NEXT: movdqa (%rdi), %xmm0
3502 ; SSE42-NEXT: paddb (%rsi), %xmm0
3503 ; SSE42-NEXT: movdqa %xmm0, %xmm1
3504 ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
3505 ; SSE42-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3506 ; SSE42-NEXT: movaps 32(%rdx), %xmm2
3507 ; SSE42-NEXT: paddb (%rdx), %xmm0
3508 ; SSE42-NEXT: paddb 16(%rdx), %xmm1
3509 ; SSE42-NEXT: movaps %xmm2, 32(%rcx)
3510 ; SSE42-NEXT: movdqa %xmm1, 16(%rcx)
3511 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
3514 ; AVX-LABEL: vec384_v48i8_to_v2i192_factor24:
3516 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3517 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3518 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
3519 ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3520 ; AVX-NEXT: vmovaps 32(%rdx), %ymm2
3521 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
3522 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
3523 ; AVX-NEXT: vmovaps %ymm2, 32(%rcx)
3524 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
3525 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
3526 ; AVX-NEXT: vzeroupper
3529 ; AVX2-LABEL: vec384_v48i8_to_v2i192_factor24:
3531 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
3532 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3533 ; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3534 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
3535 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3536 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
3537 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3538 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
3539 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
3540 ; AVX2-NEXT: vzeroupper
3543 ; AVX512F-LABEL: vec384_v48i8_to_v2i192_factor24:
3545 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
3546 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3547 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3548 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
3549 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3550 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3551 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
3552 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
3553 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
3554 ; AVX512F-NEXT: vzeroupper
3555 ; AVX512F-NEXT: retq
3557 ; AVX512BW-LABEL: vec384_v48i8_to_v2i192_factor24:
3558 ; AVX512BW: # %bb.0:
3559 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
3560 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3561 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
3562 ; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
3563 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3564 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3565 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
3566 ; AVX512BW-NEXT: vzeroupper
3567 ; AVX512BW-NEXT: retq
3568 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3569 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3570 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3571 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3572 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 1, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
3573 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3574 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3575 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3576 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3580 define void @vec384_v48i8_to_v1i384_factor48(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3581 ; SSE-LABEL: vec384_v48i8_to_v1i384_factor48:
3583 ; SSE-NEXT: movdqa (%rdi), %xmm0
3584 ; SSE-NEXT: paddb (%rsi), %xmm0
3585 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3586 ; SSE-NEXT: movaps 16(%rdx), %xmm1
3587 ; SSE-NEXT: movaps 32(%rdx), %xmm2
3588 ; SSE-NEXT: paddb (%rdx), %xmm0
3589 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
3590 ; SSE-NEXT: movaps %xmm2, 32(%rcx)
3591 ; SSE-NEXT: movdqa %xmm0, (%rcx)
3594 ; AVX-LABEL: vec384_v48i8_to_v1i384_factor48:
3596 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3597 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3598 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
3599 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
3600 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
3601 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
3602 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
3603 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
3604 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
3605 ; AVX-NEXT: vzeroupper
3608 ; AVX2-LABEL: vec384_v48i8_to_v1i384_factor48:
3610 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
3611 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3612 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
3613 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
3614 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
3615 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3616 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
3617 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
3618 ; AVX2-NEXT: vzeroupper
3621 ; AVX512F-LABEL: vec384_v48i8_to_v1i384_factor48:
3623 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
3624 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3625 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
3626 ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
3627 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
3628 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
3629 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
3630 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
3631 ; AVX512F-NEXT: vzeroupper
3632 ; AVX512F-NEXT: retq
3634 ; AVX512BW-LABEL: vec384_v48i8_to_v1i384_factor48:
3635 ; AVX512BW: # %bb.0:
3636 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
3637 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
3638 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
3639 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
3640 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3641 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
3642 ; AVX512BW-NEXT: vzeroupper
3643 ; AVX512BW-NEXT: retq
3644 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3645 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3646 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3647 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3648 %zextd.vec = shufflevector <48 x i8> %in.vec.trunc, <48 x i8> zeroinitializer, <48 x i32> <i32 0, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95>
3649 %out.bytevec.padded = shufflevector <48 x i8> %zextd.vec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3650 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3651 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3652 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3656 define void @vec384_v24i16_to_v12i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3657 ; SSE2-LABEL: vec384_v24i16_to_v12i32_factor2:
3659 ; SSE2-NEXT: movdqa (%rdi), %xmm0
3660 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
3661 ; SSE2-NEXT: paddb (%rsi), %xmm0
3662 ; SSE2-NEXT: paddb 16(%rsi), %xmm1
3663 ; SSE2-NEXT: pxor %xmm2, %xmm2
3664 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
3665 ; SSE2-NEXT: movdqa %xmm0, %xmm3
3666 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
3667 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
3668 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
3669 ; SSE2-NEXT: paddb (%rdx), %xmm3
3670 ; SSE2-NEXT: paddb 32(%rdx), %xmm1
3671 ; SSE2-NEXT: movdqa %xmm1, 32(%rcx)
3672 ; SSE2-NEXT: movdqa %xmm3, (%rcx)
3673 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
3676 ; SSE42-LABEL: vec384_v24i16_to_v12i32_factor2:
3678 ; SSE42-NEXT: movdqa (%rdi), %xmm0
3679 ; SSE42-NEXT: movdqa 16(%rdi), %xmm1
3680 ; SSE42-NEXT: paddb (%rsi), %xmm0
3681 ; SSE42-NEXT: paddb 16(%rsi), %xmm1
3682 ; SSE42-NEXT: pxor %xmm2, %xmm2
3683 ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
3684 ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
3685 ; SSE42-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
3686 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
3687 ; SSE42-NEXT: paddb (%rdx), %xmm3
3688 ; SSE42-NEXT: paddb 32(%rdx), %xmm1
3689 ; SSE42-NEXT: movdqa %xmm1, 32(%rcx)
3690 ; SSE42-NEXT: movdqa %xmm3, (%rcx)
3691 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
3694 ; AVX-LABEL: vec384_v24i16_to_v12i32_factor2:
3696 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3697 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
3698 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
3699 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3700 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
3701 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
3702 ; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
3703 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
3704 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
3705 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
3706 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
3707 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
3708 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
3709 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
3712 ; AVX2-LABEL: vec384_v24i16_to_v12i32_factor2:
3714 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
3715 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3716 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3717 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
3718 ; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
3719 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3720 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3721 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
3722 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
3723 ; AVX2-NEXT: vzeroupper
3726 ; AVX512F-LABEL: vec384_v24i16_to_v12i32_factor2:
3728 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
3729 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3730 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3731 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
3732 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
3733 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3734 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3735 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
3736 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
3737 ; AVX512F-NEXT: vzeroupper
3738 ; AVX512F-NEXT: retq
3740 ; AVX512BW-LABEL: vec384_v24i16_to_v12i32_factor2:
3741 ; AVX512BW: # %bb.0:
3742 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
3743 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3744 ; AVX512BW-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3745 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
3746 ; AVX512BW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
3747 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
3748 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3749 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
3750 ; AVX512BW-NEXT: vzeroupper
3751 ; AVX512BW-NEXT: retq
3752 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3753 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3754 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3755 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3756 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <24 x i16>
3757 %zextd.vec = shufflevector <24 x i16> %in.vec.cast, <24 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 1, i32 27, i32 2, i32 29, i32 3, i32 31, i32 4, i32 33, i32 5, i32 35, i32 6, i32 37, i32 7, i32 39, i32 8, i32 41, i32 9, i32 43, i32 10, i32 45, i32 11, i32 47>
3758 %out.bytevec = bitcast <24 x i16> %zextd.vec to <48 x i8>
3759 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3760 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3761 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3762 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3766 define void @vec384_v24i16_to_v8i48_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3767 ; SSE2-LABEL: vec384_v24i16_to_v8i48_factor3:
3769 ; SSE2-NEXT: movdqa (%rdi), %xmm0
3770 ; SSE2-NEXT: paddb (%rsi), %xmm0
3771 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
3772 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
3773 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
3774 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
3775 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
3776 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
3777 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
3778 ; SSE2-NEXT: paddb (%rdx), %xmm2
3779 ; SSE2-NEXT: paddb 32(%rdx), %xmm1
3780 ; SSE2-NEXT: movdqa %xmm1, 32(%rcx)
3781 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
3782 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
3785 ; SSE42-LABEL: vec384_v24i16_to_v8i48_factor3:
3787 ; SSE42-NEXT: movdqa (%rdi), %xmm0
3788 ; SSE42-NEXT: paddb (%rsi), %xmm0
3789 ; SSE42-NEXT: pxor %xmm1, %xmm1
3790 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
3791 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
3792 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,1,1]
3793 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1,2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
3794 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
3795 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5,6],xmm0[7]
3796 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
3797 ; SSE42-NEXT: paddb (%rdx), %xmm3
3798 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
3799 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
3800 ; SSE42-NEXT: movdqa %xmm3, (%rcx)
3801 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
3804 ; AVX-LABEL: vec384_v24i16_to_v8i48_factor3:
3806 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3807 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3808 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,1,1]
3809 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
3810 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
3811 ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
3812 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6],xmm3[7]
3813 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
3814 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
3815 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
3816 ; AVX-NEXT: vpaddb 16(%rdx), %xmm3, %xmm2
3817 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
3818 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
3819 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
3820 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
3823 ; AVX2-SLOW-LABEL: vec384_v24i16_to_v8i48_factor3:
3824 ; AVX2-SLOW: # %bb.0:
3825 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
3826 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3827 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,u,1,1,u,2,2]
3828 ; AVX2-SLOW-NEXT: vpermd %ymm0, %ymm1, %ymm1
3829 ; AVX2-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3830 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
3831 ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
3832 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
3833 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3834 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3835 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
3836 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
3837 ; AVX2-SLOW-NEXT: vzeroupper
3838 ; AVX2-SLOW-NEXT: retq
3840 ; AVX2-FAST-PERLANE-LABEL: vec384_v24i16_to_v8i48_factor3:
3841 ; AVX2-FAST-PERLANE: # %bb.0:
3842 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
3843 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3844 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,u,1,1,u,2,2]
3845 ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm0, %ymm1, %ymm1
3846 ; AVX2-FAST-PERLANE-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3847 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13],zero,zero,zero,zero,xmm0[14,15],zero,zero,zero,zero
3848 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3849 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3850 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
3851 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
3852 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
3853 ; AVX2-FAST-PERLANE-NEXT: retq
3855 ; AVX2-FAST-LABEL: vec384_v24i16_to_v8i48_factor3:
3856 ; AVX2-FAST: # %bb.0:
3857 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
3858 ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3859 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,u,1,1,u,2,2]
3860 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm1
3861 ; AVX2-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3862 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13],zero,zero,zero,zero,xmm0[14,15],zero,zero,zero,zero
3863 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3864 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3865 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
3866 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
3867 ; AVX2-FAST-NEXT: vzeroupper
3868 ; AVX2-FAST-NEXT: retq
3870 ; AVX512F-SLOW-LABEL: vec384_v24i16_to_v8i48_factor3:
3871 ; AVX512F-SLOW: # %bb.0:
3872 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm0
3873 ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3874 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,u,1,1,u,2,2]
3875 ; AVX512F-SLOW-NEXT: vpermd %ymm0, %ymm1, %ymm1
3876 ; AVX512F-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3877 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
3878 ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
3879 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
3880 ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3881 ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3882 ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
3883 ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
3884 ; AVX512F-SLOW-NEXT: vzeroupper
3885 ; AVX512F-SLOW-NEXT: retq
3887 ; AVX512F-FAST-LABEL: vec384_v24i16_to_v8i48_factor3:
3888 ; AVX512F-FAST: # %bb.0:
3889 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0
3890 ; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3891 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,0,u,1,1,u,2,2]
3892 ; AVX512F-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm1
3893 ; AVX512F-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3894 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13],zero,zero,zero,zero,xmm0[14,15],zero,zero,zero,zero
3895 ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
3896 ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
3897 ; AVX512F-FAST-NEXT: vmovdqa %ymm1, (%rcx)
3898 ; AVX512F-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
3899 ; AVX512F-FAST-NEXT: vzeroupper
3900 ; AVX512F-FAST-NEXT: retq
3902 ; AVX512BW-SLOW-LABEL: vec384_v24i16_to_v8i48_factor3:
3903 ; AVX512BW-SLOW: # %bb.0:
3904 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm0
3905 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3906 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,17,4,5,18,7,8,19,10,11,20,13,14,21]
3907 ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
3908 ; AVX512BW-SLOW-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
3909 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
3910 ; AVX512BW-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
3911 ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
3912 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
3913 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3914 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
3915 ; AVX512BW-SLOW-NEXT: vzeroupper
3916 ; AVX512BW-SLOW-NEXT: retq
3918 ; AVX512BW-FAST-LABEL: vec384_v24i16_to_v8i48_factor3:
3919 ; AVX512BW-FAST: # %bb.0:
3920 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
3921 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
3922 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,17,4,5,18,7,8,19,10,11,20,13,14,21]
3923 ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
3924 ; AVX512BW-FAST-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
3925 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13],zero,zero,zero,zero,xmm0[14,15],zero,zero,zero,zero
3926 ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
3927 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
3928 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
3929 ; AVX512BW-FAST-NEXT: vzeroupper
3930 ; AVX512BW-FAST-NEXT: retq
3931 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
3932 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
3933 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
3934 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
3935 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <24 x i16>
3936 %zextd.vec = shufflevector <24 x i16> %in.vec.cast, <24 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 1, i32 28, i32 29, i32 2, i32 31, i32 32, i32 3, i32 34, i32 35, i32 4, i32 37, i32 38, i32 5, i32 40, i32 41, i32 6, i32 43, i32 44, i32 7, i32 46, i32 47>
3937 %out.bytevec = bitcast <24 x i16> %zextd.vec to <48 x i8>
3938 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
3939 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
3940 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
3941 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
3945 define void @vec384_v24i16_to_v6i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
3946 ; SSE2-LABEL: vec384_v24i16_to_v6i64_factor4:
3948 ; SSE2-NEXT: movdqa (%rdi), %xmm0
3949 ; SSE2-NEXT: paddb (%rsi), %xmm0
3950 ; SSE2-NEXT: pxor %xmm1, %xmm1
3951 ; SSE2-NEXT: movdqa %xmm0, %xmm2
3952 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
3953 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
3954 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3955 ; SSE2-NEXT: movdqa %xmm0, %xmm3
3956 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
3957 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
3958 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
3959 ; SSE2-NEXT: paddb (%rdx), %xmm3
3960 ; SSE2-NEXT: paddb 32(%rdx), %xmm2
3961 ; SSE2-NEXT: movdqa %xmm2, 32(%rcx)
3962 ; SSE2-NEXT: movdqa %xmm3, (%rcx)
3963 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
3966 ; SSE42-LABEL: vec384_v24i16_to_v6i64_factor4:
3968 ; SSE42-NEXT: movdqa (%rdi), %xmm0
3969 ; SSE42-NEXT: paddb (%rsi), %xmm0
3970 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
3971 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
3972 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
3973 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
3974 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
3975 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
3976 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
3977 ; SSE42-NEXT: paddb (%rdx), %xmm1
3978 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
3979 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
3980 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
3983 ; AVX-LABEL: vec384_v24i16_to_v6i64_factor4:
3985 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
3986 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
3987 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
3988 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
3989 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
3990 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
3991 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
3992 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
3993 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
3994 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
3995 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
3996 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
3997 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
4000 ; AVX2-SLOW-LABEL: vec384_v24i16_to_v6i64_factor4:
4001 ; AVX2-SLOW: # %bb.0:
4002 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
4003 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4004 ; AVX2-SLOW-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
4005 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4006 ; AVX2-SLOW-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
4007 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4008 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4009 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
4010 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
4011 ; AVX2-SLOW-NEXT: vzeroupper
4012 ; AVX2-SLOW-NEXT: retq
4014 ; AVX2-FAST-PERLANE-LABEL: vec384_v24i16_to_v6i64_factor4:
4015 ; AVX2-FAST-PERLANE: # %bb.0:
4016 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
4017 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4018 ; AVX2-FAST-PERLANE-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
4019 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9],zero,zero,zero,zero,zero,zero,xmm0[10,11],zero,zero,zero,zero,zero,zero
4020 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4021 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4022 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
4023 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
4024 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
4025 ; AVX2-FAST-PERLANE-NEXT: retq
4027 ; AVX2-FAST-LABEL: vec384_v24i16_to_v6i64_factor4:
4028 ; AVX2-FAST: # %bb.0:
4029 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
4030 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4031 ; AVX2-FAST-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
4032 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9],zero,zero,zero,zero,zero,zero,xmm0[10,11],zero,zero,zero,zero,zero,zero
4033 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4034 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4035 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
4036 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
4037 ; AVX2-FAST-NEXT: vzeroupper
4038 ; AVX2-FAST-NEXT: retq
4040 ; AVX512F-LABEL: vec384_v24i16_to_v6i64_factor4:
4042 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
4043 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4044 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
4045 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9],zero,zero,zero,zero,zero,zero,xmm0[10,11],zero,zero,zero,zero,zero,zero
4046 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4047 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4048 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
4049 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
4050 ; AVX512F-NEXT: vzeroupper
4051 ; AVX512F-NEXT: retq
4053 ; AVX512BW-LABEL: vec384_v24i16_to_v6i64_factor4:
4054 ; AVX512BW: # %bb.0:
4055 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
4056 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4057 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
4058 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9],zero,zero,zero,zero,zero,zero,xmm0[10,11],zero,zero,zero,zero,zero,zero
4059 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
4060 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4061 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
4062 ; AVX512BW-NEXT: vzeroupper
4063 ; AVX512BW-NEXT: retq
4064 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
4065 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
4066 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
4067 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4068 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <24 x i16>
4069 %zextd.vec = shufflevector <24 x i16> %in.vec.cast, <24 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 1, i32 29, i32 30, i32 31, i32 2, i32 33, i32 34, i32 35, i32 3, i32 37, i32 38, i32 39, i32 4, i32 41, i32 42, i32 43, i32 5, i32 45, i32 46, i32 47>
4070 %out.bytevec = bitcast <24 x i16> %zextd.vec to <48 x i8>
4071 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4072 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
4073 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
4074 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
4078 define void @vec384_v24i16_to_v4i96_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
4079 ; SSE2-LABEL: vec384_v24i16_to_v4i96_factor6:
4081 ; SSE2-NEXT: movdqa (%rdi), %xmm0
4082 ; SSE2-NEXT: paddb (%rsi), %xmm0
4083 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
4084 ; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4085 ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9,10,11]
4086 ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,1,1,3,4,5,6,7]
4087 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
4088 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
4089 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
4090 ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4091 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
4092 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
4093 ; SSE2-NEXT: paddb (%rdx), %xmm2
4094 ; SSE2-NEXT: paddb 32(%rdx), %xmm1
4095 ; SSE2-NEXT: movdqa %xmm1, 32(%rcx)
4096 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
4097 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
4100 ; SSE42-LABEL: vec384_v24i16_to_v4i96_factor6:
4102 ; SSE42-NEXT: movdqa (%rdi), %xmm0
4103 ; SSE42-NEXT: paddb (%rsi), %xmm0
4104 ; SSE42-NEXT: pxor %xmm1, %xmm1
4105 ; SSE42-NEXT: movdqa %xmm0, %xmm2
4106 ; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[2,3],zero,zero
4107 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
4108 ; SSE42-NEXT: psrld $16, %xmm0
4109 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4,5,6,7]
4110 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4],xmm1[5,6,7]
4111 ; SSE42-NEXT: paddb 16(%rdx), %xmm3
4112 ; SSE42-NEXT: paddb 32(%rdx), %xmm0
4113 ; SSE42-NEXT: paddb (%rdx), %xmm2
4114 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
4115 ; SSE42-NEXT: movdqa %xmm0, 32(%rcx)
4116 ; SSE42-NEXT: movdqa %xmm3, 16(%rcx)
4119 ; AVX-LABEL: vec384_v24i16_to_v4i96_factor6:
4121 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
4122 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4123 ; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2,3],zero,zero
4124 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
4125 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
4126 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6,7]
4127 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
4128 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3,4,5,6,7]
4129 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
4130 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
4131 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
4132 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
4133 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
4134 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
4137 ; AVX2-SLOW-LABEL: vec384_v24i16_to_v4i96_factor6:
4138 ; AVX2-SLOW: # %bb.0:
4139 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
4140 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4141 ; AVX2-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
4142 ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4143 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3,4,5,6,7]
4144 ; AVX2-SLOW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4145 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
4146 ; AVX2-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
4147 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4148 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4149 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rcx)
4150 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
4151 ; AVX2-SLOW-NEXT: vzeroupper
4152 ; AVX2-SLOW-NEXT: retq
4154 ; AVX2-FAST-PERLANE-LABEL: vec384_v24i16_to_v4i96_factor6:
4155 ; AVX2-FAST-PERLANE: # %bb.0:
4156 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
4157 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4158 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4159 ; AVX2-FAST-PERLANE-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4160 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
4161 ; AVX2-FAST-PERLANE-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
4162 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4163 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4164 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 32(%rcx)
4165 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rcx)
4166 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
4167 ; AVX2-FAST-PERLANE-NEXT: retq
4169 ; AVX2-FAST-LABEL: vec384_v24i16_to_v4i96_factor6:
4170 ; AVX2-FAST: # %bb.0:
4171 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
4172 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4173 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4174 ; AVX2-FAST-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4175 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
4176 ; AVX2-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
4177 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4178 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4179 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rcx)
4180 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rcx)
4181 ; AVX2-FAST-NEXT: vzeroupper
4182 ; AVX2-FAST-NEXT: retq
4184 ; AVX512F-SLOW-LABEL: vec384_v24i16_to_v4i96_factor6:
4185 ; AVX512F-SLOW: # %bb.0:
4186 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0
4187 ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4188 ; AVX512F-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
4189 ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4190 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3,4,5,6,7]
4191 ; AVX512F-SLOW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4192 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
4193 ; AVX512F-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
4194 ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4195 ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4196 ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, 32(%rcx)
4197 ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
4198 ; AVX512F-SLOW-NEXT: vzeroupper
4199 ; AVX512F-SLOW-NEXT: retq
4201 ; AVX512F-FAST-LABEL: vec384_v24i16_to_v4i96_factor6:
4202 ; AVX512F-FAST: # %bb.0:
4203 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
4204 ; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4205 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4206 ; AVX512F-FAST-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4207 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
4208 ; AVX512F-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
4209 ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4210 ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4211 ; AVX512F-FAST-NEXT: vmovdqa %ymm1, 32(%rcx)
4212 ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rcx)
4213 ; AVX512F-FAST-NEXT: vzeroupper
4214 ; AVX512F-FAST-NEXT: retq
4216 ; AVX512BW-SLOW-LABEL: vec384_v24i16_to_v4i96_factor6:
4217 ; AVX512BW-SLOW: # %bb.0:
4218 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm0
4219 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4220 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,3,4,5,17,7,8,9,10,11,18,13,14,15]
4221 ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4222 ; AVX512BW-SLOW-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
4223 ; AVX512BW-SLOW-NEXT: vpsrld $16, %xmm0, %xmm0
4224 ; AVX512BW-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
4225 ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4,5,6,7]
4226 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
4227 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4228 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
4229 ; AVX512BW-SLOW-NEXT: vzeroupper
4230 ; AVX512BW-SLOW-NEXT: retq
4232 ; AVX512BW-FAST-LABEL: vec384_v24i16_to_v4i96_factor6:
4233 ; AVX512BW-FAST: # %bb.0:
4234 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
4235 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4236 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,3,4,5,17,7,8,9,10,11,18,13,14,15]
4237 ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
4238 ; AVX512BW-FAST-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
4239 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4240 ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
4241 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4242 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
4243 ; AVX512BW-FAST-NEXT: vzeroupper
4244 ; AVX512BW-FAST-NEXT: retq
4245 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
4246 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
4247 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
4248 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4249 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <24 x i16>
4250 %zextd.vec = shufflevector <24 x i16> %in.vec.cast, <24 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 1, i32 31, i32 32, i32 33, i32 34, i32 35, i32 2, i32 37, i32 38, i32 39, i32 40, i32 41, i32 3, i32 43, i32 44, i32 45, i32 46, i32 47>
4251 %out.bytevec = bitcast <24 x i16> %zextd.vec to <48 x i8>
4252 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4253 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
4254 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
4255 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
4259 define void @vec384_v24i16_to_v3i128_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
4260 ; SSE2-LABEL: vec384_v24i16_to_v3i128_factor8:
4262 ; SSE2-NEXT: movdqa (%rdi), %xmm0
4263 ; SSE2-NEXT: paddb (%rsi), %xmm0
4264 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0]
4265 ; SSE2-NEXT: pand %xmm0, %xmm1
4266 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
4267 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
4268 ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4269 ; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4270 ; SSE2-NEXT: paddb 16(%rdx), %xmm2
4271 ; SSE2-NEXT: paddb 32(%rdx), %xmm0
4272 ; SSE2-NEXT: paddb (%rdx), %xmm1
4273 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
4274 ; SSE2-NEXT: movdqa %xmm0, 32(%rcx)
4275 ; SSE2-NEXT: movdqa %xmm2, 16(%rcx)
4278 ; SSE42-LABEL: vec384_v24i16_to_v3i128_factor8:
4280 ; SSE42-NEXT: movdqa (%rdi), %xmm0
4281 ; SSE42-NEXT: paddb (%rsi), %xmm0
4282 ; SSE42-NEXT: pxor %xmm1, %xmm1
4283 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
4284 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
4285 ; SSE42-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
4286 ; SSE42-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4287 ; SSE42-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4288 ; SSE42-NEXT: paddb 16(%rdx), %xmm2
4289 ; SSE42-NEXT: paddb 32(%rdx), %xmm0
4290 ; SSE42-NEXT: paddb (%rdx), %xmm1
4291 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
4292 ; SSE42-NEXT: movdqa %xmm0, 32(%rcx)
4293 ; SSE42-NEXT: movdqa %xmm2, 16(%rcx)
4296 ; AVX-LABEL: vec384_v24i16_to_v3i128_factor8:
4298 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
4299 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4300 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
4301 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
4302 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
4303 ; AVX-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4304 ; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
4305 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4306 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
4307 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
4308 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
4309 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
4310 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
4311 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
4314 ; AVX2-SLOW-LABEL: vec384_v24i16_to_v3i128_factor8:
4315 ; AVX2-SLOW: # %bb.0:
4316 ; AVX2-SLOW-NEXT: vpxor %xmm0, %xmm0, %xmm0
4317 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1
4318 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1
4319 ; AVX2-SLOW-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
4320 ; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4321 ; AVX2-SLOW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
4322 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
4323 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
4324 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4325 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm2, %ymm1
4326 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rcx)
4327 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
4328 ; AVX2-SLOW-NEXT: vzeroupper
4329 ; AVX2-SLOW-NEXT: retq
4331 ; AVX2-FAST-PERLANE-LABEL: vec384_v24i16_to_v3i128_factor8:
4332 ; AVX2-FAST-PERLANE: # %bb.0:
4333 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm0, %xmm0, %xmm0
4334 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
4335 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm1, %xmm1
4336 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4337 ; AVX2-FAST-PERLANE-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
4338 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
4339 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
4340 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4341 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm2, %ymm1
4342 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 32(%rcx)
4343 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rcx)
4344 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
4345 ; AVX2-FAST-PERLANE-NEXT: retq
4347 ; AVX2-FAST-LABEL: vec384_v24i16_to_v3i128_factor8:
4348 ; AVX2-FAST: # %bb.0:
4349 ; AVX2-FAST-NEXT: vpxor %xmm0, %xmm0, %xmm0
4350 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm1
4351 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm1, %xmm1
4352 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4353 ; AVX2-FAST-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
4354 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
4355 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
4356 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4357 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm2, %ymm1
4358 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 32(%rcx)
4359 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rcx)
4360 ; AVX2-FAST-NEXT: vzeroupper
4361 ; AVX2-FAST-NEXT: retq
4363 ; AVX512F-SLOW-LABEL: vec384_v24i16_to_v3i128_factor8:
4364 ; AVX512F-SLOW: # %bb.0:
4365 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0
4366 ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4367 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
4368 ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4369 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
4370 ; AVX512F-SLOW-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
4371 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
4372 ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4373 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
4374 ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4375 ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4376 ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, 32(%rcx)
4377 ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
4378 ; AVX512F-SLOW-NEXT: vzeroupper
4379 ; AVX512F-SLOW-NEXT: retq
4381 ; AVX512F-FAST-LABEL: vec384_v24i16_to_v3i128_factor8:
4382 ; AVX512F-FAST: # %bb.0:
4383 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
4384 ; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4385 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4386 ; AVX512F-FAST-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
4387 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
4388 ; AVX512F-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
4389 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
4390 ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4391 ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4392 ; AVX512F-FAST-NEXT: vmovdqa %ymm1, 32(%rcx)
4393 ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rcx)
4394 ; AVX512F-FAST-NEXT: vzeroupper
4395 ; AVX512F-FAST-NEXT: retq
4397 ; AVX512BW-SLOW-LABEL: vec384_v24i16_to_v3i128_factor8:
4398 ; AVX512BW-SLOW: # %bb.0:
4399 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm0
4400 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4401 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,3,4,5,6,7,17,9,10,11,12,13,14,15]
4402 ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4403 ; AVX512BW-SLOW-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
4404 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
4405 ; AVX512BW-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
4406 ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
4407 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
4408 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4409 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
4410 ; AVX512BW-SLOW-NEXT: vzeroupper
4411 ; AVX512BW-SLOW-NEXT: retq
4413 ; AVX512BW-FAST-LABEL: vec384_v24i16_to_v3i128_factor8:
4414 ; AVX512BW-FAST: # %bb.0:
4415 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
4416 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4417 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,3,4,5,6,7,17,9,10,11,12,13,14,15]
4418 ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
4419 ; AVX512BW-FAST-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
4420 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4421 ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm0
4422 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4423 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
4424 ; AVX512BW-FAST-NEXT: vzeroupper
4425 ; AVX512BW-FAST-NEXT: retq
4426 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
4427 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
4428 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
4429 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4430 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <24 x i16>
4431 %zextd.vec = shufflevector <24 x i16> %in.vec.cast, <24 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 1, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 2, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4432 %out.bytevec = bitcast <24 x i16> %zextd.vec to <48 x i8>
4433 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4434 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
4435 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
4436 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
4440 define void @vec384_v24i16_to_v2i192_factor12(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
4441 ; SSE2-LABEL: vec384_v24i16_to_v2i192_factor12:
4443 ; SSE2-NEXT: movdqa (%rdi), %xmm0
4444 ; SSE2-NEXT: paddb (%rsi), %xmm0
4445 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0]
4446 ; SSE2-NEXT: pand %xmm0, %xmm1
4447 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
4448 ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4449 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
4450 ; SSE2-NEXT: movaps 32(%rdx), %xmm2
4451 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
4452 ; SSE2-NEXT: paddb (%rdx), %xmm1
4453 ; SSE2-NEXT: movaps %xmm2, 32(%rcx)
4454 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
4455 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
4458 ; SSE42-LABEL: vec384_v24i16_to_v2i192_factor12:
4460 ; SSE42-NEXT: movdqa (%rdi), %xmm0
4461 ; SSE42-NEXT: paddb (%rsi), %xmm0
4462 ; SSE42-NEXT: pxor %xmm1, %xmm1
4463 ; SSE42-NEXT: pxor %xmm2, %xmm2
4464 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3,4,5,6,7]
4465 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
4466 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
4467 ; SSE42-NEXT: movaps 32(%rdx), %xmm1
4468 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
4469 ; SSE42-NEXT: paddb (%rdx), %xmm2
4470 ; SSE42-NEXT: movaps %xmm1, 32(%rcx)
4471 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
4472 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
4475 ; AVX-LABEL: vec384_v24i16_to_v2i192_factor12:
4477 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
4478 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4479 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
4480 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3,4,5,6,7]
4481 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
4482 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6,7]
4483 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
4484 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
4485 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
4486 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
4487 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
4488 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
4489 ; AVX-NEXT: vzeroupper
4492 ; AVX2-LABEL: vec384_v24i16_to_v2i192_factor12:
4494 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
4495 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4496 ; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
4497 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
4498 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
4499 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
4500 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4501 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
4502 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
4503 ; AVX2-NEXT: vzeroupper
4506 ; AVX512F-LABEL: vec384_v24i16_to_v2i192_factor12:
4508 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
4509 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4510 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
4511 ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
4512 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
4513 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4514 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
4515 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
4516 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
4517 ; AVX512F-NEXT: vzeroupper
4518 ; AVX512F-NEXT: retq
4520 ; AVX512BW-LABEL: vec384_v24i16_to_v2i192_factor12:
4521 ; AVX512BW: # %bb.0:
4522 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
4523 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4524 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [16,1,2,3,4,5,6,7,8,9,10,11,17,13,14,15]
4525 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4526 ; AVX512BW-NEXT: vpermt2w %ymm0, %ymm1, %ymm2
4527 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0
4528 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
4529 ; AVX512BW-NEXT: vzeroupper
4530 ; AVX512BW-NEXT: retq
4531 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
4532 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
4533 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
4534 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4535 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <24 x i16>
4536 %zextd.vec = shufflevector <24 x i16> %in.vec.cast, <24 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 1, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4537 %out.bytevec = bitcast <24 x i16> %zextd.vec to <48 x i8>
4538 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4539 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
4540 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
4541 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
4545 define void @vec384_v24i16_to_v1i384_factor24(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
4546 ; SSE2-LABEL: vec384_v24i16_to_v1i384_factor24:
4548 ; SSE2-NEXT: movdqa (%rdi), %xmm0
4549 ; SSE2-NEXT: paddb (%rsi), %xmm0
4550 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
4551 ; SSE2-NEXT: movaps 16(%rdx), %xmm1
4552 ; SSE2-NEXT: movaps 32(%rdx), %xmm2
4553 ; SSE2-NEXT: paddb (%rdx), %xmm0
4554 ; SSE2-NEXT: movaps %xmm1, 16(%rcx)
4555 ; SSE2-NEXT: movaps %xmm2, 32(%rcx)
4556 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
4559 ; SSE42-LABEL: vec384_v24i16_to_v1i384_factor24:
4561 ; SSE42-NEXT: movdqa (%rdi), %xmm0
4562 ; SSE42-NEXT: paddb (%rsi), %xmm0
4563 ; SSE42-NEXT: pxor %xmm1, %xmm1
4564 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
4565 ; SSE42-NEXT: movaps 16(%rdx), %xmm0
4566 ; SSE42-NEXT: movaps 32(%rdx), %xmm2
4567 ; SSE42-NEXT: paddb (%rdx), %xmm1
4568 ; SSE42-NEXT: movaps %xmm0, 16(%rcx)
4569 ; SSE42-NEXT: movaps %xmm2, 32(%rcx)
4570 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
4573 ; AVX-LABEL: vec384_v24i16_to_v1i384_factor24:
4575 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
4576 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4577 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
4578 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
4579 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7]
4580 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
4581 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
4582 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
4583 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
4584 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
4585 ; AVX-NEXT: vzeroupper
4588 ; AVX2-LABEL: vec384_v24i16_to_v1i384_factor24:
4590 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
4591 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4592 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
4593 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
4594 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
4595 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4596 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
4597 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
4598 ; AVX2-NEXT: vzeroupper
4601 ; AVX512F-LABEL: vec384_v24i16_to_v1i384_factor24:
4603 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
4604 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4605 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
4606 ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
4607 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4608 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
4609 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
4610 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
4611 ; AVX512F-NEXT: vzeroupper
4612 ; AVX512F-NEXT: retq
4614 ; AVX512BW-LABEL: vec384_v24i16_to_v1i384_factor24:
4615 ; AVX512BW: # %bb.0:
4616 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
4617 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
4618 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
4619 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4620 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
4621 ; AVX512BW-NEXT: vzeroupper
4622 ; AVX512BW-NEXT: retq
4623 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
4624 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
4625 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
4626 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4627 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <24 x i16>
4628 %zextd.vec = shufflevector <24 x i16> %in.vec.cast, <24 x i16> zeroinitializer, <24 x i32> <i32 0, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4629 %out.bytevec = bitcast <24 x i16> %zextd.vec to <48 x i8>
4630 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4631 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
4632 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
4633 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
4637 define void @vec384_v12i32_to_v6i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
4638 ; SSE2-LABEL: vec384_v12i32_to_v6i64_factor2:
4640 ; SSE2-NEXT: movdqa (%rdi), %xmm0
4641 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
4642 ; SSE2-NEXT: paddb (%rsi), %xmm0
4643 ; SSE2-NEXT: paddb 16(%rsi), %xmm1
4644 ; SSE2-NEXT: pxor %xmm2, %xmm2
4645 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
4646 ; SSE2-NEXT: movdqa %xmm0, %xmm3
4647 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
4648 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
4649 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
4650 ; SSE2-NEXT: paddb (%rdx), %xmm3
4651 ; SSE2-NEXT: paddb 32(%rdx), %xmm1
4652 ; SSE2-NEXT: movdqa %xmm1, 32(%rcx)
4653 ; SSE2-NEXT: movdqa %xmm3, (%rcx)
4654 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
4657 ; SSE42-LABEL: vec384_v12i32_to_v6i64_factor2:
4659 ; SSE42-NEXT: movdqa (%rdi), %xmm0
4660 ; SSE42-NEXT: movdqa 16(%rdi), %xmm1
4661 ; SSE42-NEXT: paddb (%rsi), %xmm0
4662 ; SSE42-NEXT: paddb 16(%rsi), %xmm1
4663 ; SSE42-NEXT: pxor %xmm2, %xmm2
4664 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
4665 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
4666 ; SSE42-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
4667 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
4668 ; SSE42-NEXT: paddb (%rdx), %xmm3
4669 ; SSE42-NEXT: paddb 32(%rdx), %xmm1
4670 ; SSE42-NEXT: movdqa %xmm1, 32(%rcx)
4671 ; SSE42-NEXT: movdqa %xmm3, (%rcx)
4672 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
4675 ; AVX-LABEL: vec384_v12i32_to_v6i64_factor2:
4677 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
4678 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
4679 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
4680 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4681 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
4682 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
4683 ; AVX-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
4684 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
4685 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
4686 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
4687 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
4688 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
4689 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
4690 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
4693 ; AVX2-LABEL: vec384_v12i32_to_v6i64_factor2:
4695 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
4696 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4697 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4698 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
4699 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4700 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4701 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4702 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
4703 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
4704 ; AVX2-NEXT: vzeroupper
4707 ; AVX512F-LABEL: vec384_v12i32_to_v6i64_factor2:
4709 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
4710 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4711 ; AVX512F-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
4712 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
4713 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
4714 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4715 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
4716 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
4717 ; AVX512F-NEXT: vzeroupper
4718 ; AVX512F-NEXT: retq
4720 ; AVX512BW-LABEL: vec384_v12i32_to_v6i64_factor2:
4721 ; AVX512BW: # %bb.0:
4722 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
4723 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4724 ; AVX512BW-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4725 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
4726 ; AVX512BW-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
4727 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
4728 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4729 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
4730 ; AVX512BW-NEXT: vzeroupper
4731 ; AVX512BW-NEXT: retq
4732 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
4733 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
4734 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
4735 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4736 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <12 x i32>
4737 %zextd.vec = shufflevector <12 x i32> %in.vec.cast, <12 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 1, i32 15, i32 2, i32 17, i32 3, i32 19, i32 4, i32 21, i32 5, i32 23>
4738 %out.bytevec = bitcast <12 x i32> %zextd.vec to <48 x i8>
4739 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4740 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
4741 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
4742 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
4746 define void @vec384_v12i32_to_v4i96_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
4747 ; SSE2-LABEL: vec384_v12i32_to_v4i96_factor3:
4749 ; SSE2-NEXT: movdqa (%rdi), %xmm0
4750 ; SSE2-NEXT: paddb (%rsi), %xmm0
4751 ; SSE2-NEXT: xorps %xmm1, %xmm1
4752 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,0,4294967295,0]
4753 ; SSE2-NEXT: pand %xmm0, %xmm2
4754 ; SSE2-NEXT: movdqa %xmm0, %xmm3
4755 ; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4756 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[2,3]
4757 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
4758 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,0,1,3]
4759 ; SSE2-NEXT: paddb (%rdx), %xmm0
4760 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
4761 ; SSE2-NEXT: paddb 16(%rdx), %xmm2
4762 ; SSE2-NEXT: movdqa %xmm2, 16(%rcx)
4763 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
4764 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
4767 ; SSE42-LABEL: vec384_v12i32_to_v4i96_factor3:
4769 ; SSE42-NEXT: movdqa (%rdi), %xmm0
4770 ; SSE42-NEXT: paddb (%rsi), %xmm0
4771 ; SSE42-NEXT: pxor %xmm1, %xmm1
4772 ; SSE42-NEXT: pxor %xmm2, %xmm2
4773 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
4774 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
4775 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6,7]
4776 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
4777 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
4778 ; SSE42-NEXT: paddb (%rdx), %xmm0
4779 ; SSE42-NEXT: paddb 32(%rdx), %xmm3
4780 ; SSE42-NEXT: paddb 16(%rdx), %xmm2
4781 ; SSE42-NEXT: movdqa %xmm2, 16(%rcx)
4782 ; SSE42-NEXT: movdqa %xmm3, 32(%rcx)
4783 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
4786 ; AVX-LABEL: vec384_v12i32_to_v4i96_factor3:
4788 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
4789 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4790 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
4791 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
4792 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm2
4793 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
4794 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4795 ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
4796 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3]
4797 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
4798 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
4799 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
4800 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
4801 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
4802 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
4803 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
4804 ; AVX-NEXT: vzeroupper
4807 ; AVX2-SLOW-LABEL: vec384_v12i32_to_v4i96_factor3:
4808 ; AVX2-SLOW: # %bb.0:
4809 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
4810 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4811 ; AVX2-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
4812 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,0,2,1]
4813 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
4814 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4815 ; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4816 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3]
4817 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4818 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4819 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
4820 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
4821 ; AVX2-SLOW-NEXT: vzeroupper
4822 ; AVX2-SLOW-NEXT: retq
4824 ; AVX2-FAST-PERLANE-LABEL: vec384_v12i32_to_v4i96_factor3:
4825 ; AVX2-FAST-PERLANE: # %bb.0:
4826 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
4827 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4828 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm1, %xmm1, %xmm1
4829 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,0,2,1]
4830 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
4831 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
4832 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4833 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4834 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
4835 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
4836 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
4837 ; AVX2-FAST-PERLANE-NEXT: retq
4839 ; AVX2-FAST-LABEL: vec384_v12i32_to_v4i96_factor3:
4840 ; AVX2-FAST: # %bb.0:
4841 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
4842 ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4843 ; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
4844 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,0,2,1]
4845 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
4846 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
4847 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4848 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
4849 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
4850 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
4851 ; AVX2-FAST-NEXT: vzeroupper
4852 ; AVX2-FAST-NEXT: retq
4854 ; AVX512F-LABEL: vec384_v12i32_to_v4i96_factor3:
4856 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
4857 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4858 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,2,17,4,5,18,7,8,19,10,11,u,u,u,u]
4859 ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
4860 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm2
4861 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0
4862 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
4863 ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1
4864 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
4865 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
4866 ; AVX512F-NEXT: vzeroupper
4867 ; AVX512F-NEXT: retq
4869 ; AVX512BW-SLOW-LABEL: vec384_v12i32_to_v4i96_factor3:
4870 ; AVX512BW-SLOW: # %bb.0:
4871 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm0
4872 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4873 ; AVX512BW-SLOW-NEXT: movb $73, %al
4874 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1
4875 ; AVX512BW-SLOW-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
4876 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4877 ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
4878 ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3]
4879 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
4880 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4881 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
4882 ; AVX512BW-SLOW-NEXT: vzeroupper
4883 ; AVX512BW-SLOW-NEXT: retq
4885 ; AVX512BW-FAST-LABEL: vec384_v12i32_to_v4i96_factor3:
4886 ; AVX512BW-FAST: # %bb.0:
4887 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
4888 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
4889 ; AVX512BW-FAST-NEXT: movb $73, %al
4890 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1
4891 ; AVX512BW-FAST-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
4892 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
4893 ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
4894 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
4895 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
4896 ; AVX512BW-FAST-NEXT: vzeroupper
4897 ; AVX512BW-FAST-NEXT: retq
4898 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
4899 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
4900 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
4901 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
4902 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <12 x i32>
4903 %zextd.vec = shufflevector <12 x i32> %in.vec.cast, <12 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 14, i32 1, i32 16, i32 17, i32 2, i32 19, i32 20, i32 3, i32 22, i32 23>
4904 %out.bytevec = bitcast <12 x i32> %zextd.vec to <48 x i8>
4905 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
4906 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
4907 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
4908 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
4912 define void @vec384_v12i32_to_v3i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
4913 ; SSE2-LABEL: vec384_v12i32_to_v3i128_factor4:
4915 ; SSE2-NEXT: movdqa (%rdi), %xmm0
4916 ; SSE2-NEXT: paddb (%rsi), %xmm0
4917 ; SSE2-NEXT: xorps %xmm1, %xmm1
4918 ; SSE2-NEXT: xorps %xmm2, %xmm2
4919 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
4920 ; SSE2-NEXT: movdqa %xmm0, %xmm3
4921 ; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
4922 ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm1[2,3]
4923 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[1,0]
4924 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
4925 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
4926 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
4927 ; SSE2-NEXT: paddb (%rdx), %xmm2
4928 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
4929 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
4930 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
4933 ; SSE42-LABEL: vec384_v12i32_to_v3i128_factor4:
4935 ; SSE42-NEXT: movdqa (%rdi), %xmm0
4936 ; SSE42-NEXT: paddb (%rsi), %xmm0
4937 ; SSE42-NEXT: pxor %xmm1, %xmm1
4938 ; SSE42-NEXT: pxor %xmm2, %xmm2
4939 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
4940 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
4941 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3,4,5,6,7]
4942 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
4943 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
4944 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
4945 ; SSE42-NEXT: paddb 32(%rdx), %xmm3
4946 ; SSE42-NEXT: paddb (%rdx), %xmm2
4947 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
4948 ; SSE42-NEXT: movdqa %xmm3, 32(%rcx)
4949 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
4952 ; AVX-LABEL: vec384_v12i32_to_v3i128_factor4:
4954 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
4955 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
4956 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
4957 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
4958 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
4959 ; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7]
4960 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
4961 ; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
4962 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
4963 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
4964 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
4965 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
4966 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
4967 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
4968 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
4969 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
4970 ; AVX-NEXT: vzeroupper
4973 ; AVX2-SLOW-LABEL: vec384_v12i32_to_v3i128_factor4:
4974 ; AVX2-SLOW: # %bb.0:
4975 ; AVX2-SLOW-NEXT: vpxor %xmm0, %xmm0, %xmm0
4976 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1
4977 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1
4978 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
4979 ; AVX2-SLOW-NEXT: vpxor %xmm3, %xmm3, %xmm3
4980 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3,4,5,6,7]
4981 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
4982 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
4983 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
4984 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
4985 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm2, %ymm1
4986 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 32(%rcx)
4987 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
4988 ; AVX2-SLOW-NEXT: vzeroupper
4989 ; AVX2-SLOW-NEXT: retq
4991 ; AVX2-FAST-PERLANE-LABEL: vec384_v12i32_to_v3i128_factor4:
4992 ; AVX2-FAST-PERLANE: # %bb.0:
4993 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm0, %xmm0, %xmm0
4994 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
4995 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm1, %xmm1
4996 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,10,11],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
4997 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
4998 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
4999 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
5000 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5001 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm2, %ymm1
5002 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 32(%rcx)
5003 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rcx)
5004 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
5005 ; AVX2-FAST-PERLANE-NEXT: retq
5007 ; AVX2-FAST-LABEL: vec384_v12i32_to_v3i128_factor4:
5008 ; AVX2-FAST: # %bb.0:
5009 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
5010 ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5011 ; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
5012 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,u,u,u,1,u,u,u]
5013 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm2
5014 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7]
5015 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5016 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5017 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
5018 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
5019 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
5020 ; AVX2-FAST-NEXT: vzeroupper
5021 ; AVX2-FAST-NEXT: retq
5023 ; AVX512F-LABEL: vec384_v12i32_to_v3i128_factor4:
5025 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
5026 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5027 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,2,3,17,5,6,7,18,9,10,11,u,u,u,u]
5028 ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
5029 ; AVX512F-NEXT: vpermt2d %zmm0, %zmm1, %zmm2
5030 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0
5031 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5032 ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1
5033 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
5034 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
5035 ; AVX512F-NEXT: vzeroupper
5036 ; AVX512F-NEXT: retq
5038 ; AVX512BW-SLOW-LABEL: vec384_v12i32_to_v3i128_factor4:
5039 ; AVX512BW-SLOW: # %bb.0:
5040 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm0
5041 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5042 ; AVX512BW-SLOW-NEXT: movb $17, %al
5043 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1
5044 ; AVX512BW-SLOW-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
5045 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
5046 ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
5047 ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
5048 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
5049 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5050 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
5051 ; AVX512BW-SLOW-NEXT: vzeroupper
5052 ; AVX512BW-SLOW-NEXT: retq
5054 ; AVX512BW-FAST-LABEL: vec384_v12i32_to_v3i128_factor4:
5055 ; AVX512BW-FAST: # %bb.0:
5056 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
5057 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5058 ; AVX512BW-FAST-NEXT: movb $17, %al
5059 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1
5060 ; AVX512BW-FAST-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
5061 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5062 ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
5063 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5064 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
5065 ; AVX512BW-FAST-NEXT: vzeroupper
5066 ; AVX512BW-FAST-NEXT: retq
5067 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5068 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5069 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5070 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
5071 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <12 x i32>
5072 %zextd.vec = shufflevector <12 x i32> %in.vec.cast, <12 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 14, i32 15, i32 1, i32 17, i32 18, i32 19, i32 2, i32 21, i32 22, i32 23>
5073 %out.bytevec = bitcast <12 x i32> %zextd.vec to <48 x i8>
5074 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5075 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5076 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
5077 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5081 define void @vec384_v12i32_to_v2i192_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5082 ; SSE2-LABEL: vec384_v12i32_to_v2i192_factor6:
5084 ; SSE2-NEXT: movdqa (%rdi), %xmm0
5085 ; SSE2-NEXT: paddb (%rsi), %xmm0
5086 ; SSE2-NEXT: xorps %xmm1, %xmm1
5087 ; SSE2-NEXT: xorps %xmm2, %xmm2
5088 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
5089 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[3,0]
5090 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
5091 ; SSE2-NEXT: movaps 32(%rdx), %xmm0
5092 ; SSE2-NEXT: paddb 16(%rdx), %xmm1
5093 ; SSE2-NEXT: paddb (%rdx), %xmm2
5094 ; SSE2-NEXT: movaps %xmm0, 32(%rcx)
5095 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
5096 ; SSE2-NEXT: movdqa %xmm1, 16(%rcx)
5099 ; SSE42-LABEL: vec384_v12i32_to_v2i192_factor6:
5101 ; SSE42-NEXT: movdqa (%rdi), %xmm0
5102 ; SSE42-NEXT: paddb (%rsi), %xmm0
5103 ; SSE42-NEXT: pxor %xmm1, %xmm1
5104 ; SSE42-NEXT: pxor %xmm2, %xmm2
5105 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
5106 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
5107 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
5108 ; SSE42-NEXT: movaps 32(%rdx), %xmm1
5109 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
5110 ; SSE42-NEXT: paddb (%rdx), %xmm2
5111 ; SSE42-NEXT: movaps %xmm1, 32(%rcx)
5112 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
5113 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
5116 ; AVX-LABEL: vec384_v12i32_to_v2i192_factor6:
5118 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5119 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5120 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
5121 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
5122 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
5123 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5],ymm0[6],ymm1[7]
5124 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
5125 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
5126 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
5127 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
5128 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
5129 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
5130 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
5131 ; AVX-NEXT: vzeroupper
5134 ; AVX2-SLOW-LABEL: vec384_v12i32_to_v2i192_factor6:
5135 ; AVX2-SLOW: # %bb.0:
5136 ; AVX2-SLOW-NEXT: vpxor %xmm0, %xmm0, %xmm0
5137 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1
5138 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1
5139 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
5140 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
5141 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
5142 ; AVX2-SLOW-NEXT: vmovaps 32(%rdx), %ymm1
5143 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5144 ; AVX2-SLOW-NEXT: vmovaps %ymm1, 32(%rcx)
5145 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
5146 ; AVX2-SLOW-NEXT: vzeroupper
5147 ; AVX2-SLOW-NEXT: retq
5149 ; AVX2-FAST-PERLANE-LABEL: vec384_v12i32_to_v2i192_factor6:
5150 ; AVX2-FAST-PERLANE: # %bb.0:
5151 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm0, %xmm0, %xmm0
5152 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
5153 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm1, %xmm1
5154 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
5155 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1]
5156 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
5157 ; AVX2-FAST-PERLANE-NEXT: vmovaps 32(%rdx), %ymm1
5158 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5159 ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm1, 32(%rcx)
5160 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rcx)
5161 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
5162 ; AVX2-FAST-PERLANE-NEXT: retq
5164 ; AVX2-FAST-LABEL: vec384_v12i32_to_v2i192_factor6:
5165 ; AVX2-FAST: # %bb.0:
5166 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
5167 ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5168 ; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
5169 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [0,0,1,0,0,0,1,0]
5170 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1]
5171 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
5172 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5],ymm0[6],ymm1[7]
5173 ; AVX2-FAST-NEXT: vmovaps 32(%rdx), %ymm1
5174 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5175 ; AVX2-FAST-NEXT: vmovaps %ymm1, 32(%rcx)
5176 ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rcx)
5177 ; AVX2-FAST-NEXT: vzeroupper
5178 ; AVX2-FAST-NEXT: retq
5180 ; AVX512F-LABEL: vec384_v12i32_to_v2i192_factor6:
5182 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
5183 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5184 ; AVX512F-NEXT: movb $65, %al
5185 ; AVX512F-NEXT: kmovw %eax, %k1
5186 ; AVX512F-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
5187 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5188 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
5189 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
5190 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
5191 ; AVX512F-NEXT: vzeroupper
5192 ; AVX512F-NEXT: retq
5194 ; AVX512BW-LABEL: vec384_v12i32_to_v2i192_factor6:
5195 ; AVX512BW: # %bb.0:
5196 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
5197 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5198 ; AVX512BW-NEXT: movb $65, %al
5199 ; AVX512BW-NEXT: kmovd %eax, %k1
5200 ; AVX512BW-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
5201 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5202 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5203 ; AVX512BW-NEXT: vzeroupper
5204 ; AVX512BW-NEXT: retq
5205 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5206 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5207 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5208 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
5209 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <12 x i32>
5210 %zextd.vec = shufflevector <12 x i32> %in.vec.cast, <12 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 14, i32 15, i32 16, i32 17, i32 1, i32 19, i32 20, i32 21, i32 22, i32 23>
5211 %out.bytevec = bitcast <12 x i32> %zextd.vec to <48 x i8>
5212 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5213 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5214 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
5215 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5219 define void @vec384_v12i32_to_v1i384_factor12(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5220 ; SSE2-LABEL: vec384_v12i32_to_v1i384_factor12:
5222 ; SSE2-NEXT: movdqa (%rdi), %xmm0
5223 ; SSE2-NEXT: paddb (%rsi), %xmm0
5224 ; SSE2-NEXT: xorps %xmm1, %xmm1
5225 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
5226 ; SSE2-NEXT: movaps 16(%rdx), %xmm0
5227 ; SSE2-NEXT: movaps 32(%rdx), %xmm2
5228 ; SSE2-NEXT: paddb (%rdx), %xmm1
5229 ; SSE2-NEXT: movaps %xmm0, 16(%rcx)
5230 ; SSE2-NEXT: movaps %xmm2, 32(%rcx)
5231 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
5234 ; SSE42-LABEL: vec384_v12i32_to_v1i384_factor12:
5236 ; SSE42-NEXT: movdqa (%rdi), %xmm0
5237 ; SSE42-NEXT: paddb (%rsi), %xmm0
5238 ; SSE42-NEXT: pxor %xmm1, %xmm1
5239 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
5240 ; SSE42-NEXT: movaps 16(%rdx), %xmm0
5241 ; SSE42-NEXT: movaps 32(%rdx), %xmm2
5242 ; SSE42-NEXT: paddb (%rdx), %xmm1
5243 ; SSE42-NEXT: movaps %xmm0, 16(%rcx)
5244 ; SSE42-NEXT: movaps %xmm2, 32(%rcx)
5245 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
5248 ; AVX-LABEL: vec384_v12i32_to_v1i384_factor12:
5250 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5251 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5252 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
5253 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
5254 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
5255 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
5256 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
5257 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
5258 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
5259 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
5260 ; AVX-NEXT: vzeroupper
5263 ; AVX2-LABEL: vec384_v12i32_to_v1i384_factor12:
5265 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
5266 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5267 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
5268 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
5269 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
5270 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5271 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
5272 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
5273 ; AVX2-NEXT: vzeroupper
5276 ; AVX512F-LABEL: vec384_v12i32_to_v1i384_factor12:
5278 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
5279 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5280 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
5281 ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
5282 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5283 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
5284 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
5285 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
5286 ; AVX512F-NEXT: vzeroupper
5287 ; AVX512F-NEXT: retq
5289 ; AVX512BW-LABEL: vec384_v12i32_to_v1i384_factor12:
5290 ; AVX512BW: # %bb.0:
5291 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
5292 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
5293 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
5294 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
5295 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5296 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5297 ; AVX512BW-NEXT: vzeroupper
5298 ; AVX512BW-NEXT: retq
5299 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5300 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5301 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5302 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
5303 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <12 x i32>
5304 %zextd.vec = shufflevector <12 x i32> %in.vec.cast, <12 x i32> zeroinitializer, <12 x i32> <i32 0, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
5305 %out.bytevec = bitcast <12 x i32> %zextd.vec to <48 x i8>
5306 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5307 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5308 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
5309 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5313 define void @vec384_v6i64_to_v3i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5314 ; SSE-LABEL: vec384_v6i64_to_v3i128_factor2:
5316 ; SSE-NEXT: movdqa (%rdi), %xmm0
5317 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
5318 ; SSE-NEXT: paddb (%rsi), %xmm0
5319 ; SSE-NEXT: paddb 16(%rsi), %xmm1
5320 ; SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
5321 ; SSE-NEXT: movq {{.*#+}} xmm2 = xmm0[0],zero
5322 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
5323 ; SSE-NEXT: paddb 16(%rdx), %xmm0
5324 ; SSE-NEXT: paddb (%rdx), %xmm2
5325 ; SSE-NEXT: paddb 32(%rdx), %xmm1
5326 ; SSE-NEXT: movdqa %xmm1, 32(%rcx)
5327 ; SSE-NEXT: movdqa %xmm2, (%rcx)
5328 ; SSE-NEXT: movdqa %xmm0, 16(%rcx)
5331 ; AVX-LABEL: vec384_v6i64_to_v3i128_factor2:
5333 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5334 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
5335 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
5336 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5337 ; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
5338 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
5339 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[3],ymm2[3]
5340 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = xmm1[0],zero
5341 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
5342 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
5343 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
5344 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
5345 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
5346 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
5347 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
5348 ; AVX-NEXT: vzeroupper
5351 ; AVX2-LABEL: vec384_v6i64_to_v3i128_factor2:
5353 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
5354 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5355 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
5356 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,1,3]
5357 ; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
5358 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
5359 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
5360 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5361 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
5362 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
5363 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
5364 ; AVX2-NEXT: vzeroupper
5367 ; AVX512F-LABEL: vec384_v6i64_to_v3i128_factor2:
5369 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
5370 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5371 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
5372 ; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,9,1,11,2,13,u,u]
5373 ; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
5374 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm0
5375 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5376 ; AVX512F-NEXT: vpaddb (%rdx), %ymm2, %ymm1
5377 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
5378 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
5379 ; AVX512F-NEXT: vzeroupper
5380 ; AVX512F-NEXT: retq
5382 ; AVX512BW-LABEL: vec384_v6i64_to_v3i128_factor2:
5383 ; AVX512BW: # %bb.0:
5384 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
5385 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5386 ; AVX512BW-NEXT: movb $5, %al
5387 ; AVX512BW-NEXT: kmovd %eax, %k1
5388 ; AVX512BW-NEXT: vpexpandq %ymm0, %ymm1 {%k1} {z}
5389 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
5390 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
5391 ; AVX512BW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
5392 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5393 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5394 ; AVX512BW-NEXT: vzeroupper
5395 ; AVX512BW-NEXT: retq
5396 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5397 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5398 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5399 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
5400 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <6 x i64>
5401 %zextd.vec = shufflevector <6 x i64> %in.vec.cast, <6 x i64> zeroinitializer, <6 x i32> <i32 0, i32 7, i32 1, i32 9, i32 2, i32 11>
5402 %out.bytevec = bitcast <6 x i64> %zextd.vec to <48 x i8>
5403 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5404 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5405 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
5406 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5410 define void @vec384_v6i64_to_v2i192_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5411 ; SSE2-LABEL: vec384_v6i64_to_v2i192_factor3:
5413 ; SSE2-NEXT: movdqa (%rdi), %xmm0
5414 ; SSE2-NEXT: paddb (%rsi), %xmm0
5415 ; SSE2-NEXT: pxor %xmm1, %xmm1
5416 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm0[1]
5417 ; SSE2-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
5418 ; SSE2-NEXT: movaps 32(%rdx), %xmm2
5419 ; SSE2-NEXT: paddb (%rdx), %xmm0
5420 ; SSE2-NEXT: paddb 16(%rdx), %xmm1
5421 ; SSE2-NEXT: movaps %xmm2, 32(%rcx)
5422 ; SSE2-NEXT: movdqa %xmm1, 16(%rcx)
5423 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
5426 ; SSE42-LABEL: vec384_v6i64_to_v2i192_factor3:
5428 ; SSE42-NEXT: movdqa (%rdi), %xmm0
5429 ; SSE42-NEXT: paddb (%rsi), %xmm0
5430 ; SSE42-NEXT: pxor %xmm1, %xmm1
5431 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
5432 ; SSE42-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
5433 ; SSE42-NEXT: movaps 32(%rdx), %xmm2
5434 ; SSE42-NEXT: paddb (%rdx), %xmm0
5435 ; SSE42-NEXT: paddb 16(%rdx), %xmm1
5436 ; SSE42-NEXT: movaps %xmm2, 32(%rcx)
5437 ; SSE42-NEXT: movdqa %xmm1, 16(%rcx)
5438 ; SSE42-NEXT: movdqa %xmm0, (%rcx)
5441 ; AVX-LABEL: vec384_v6i64_to_v2i192_factor3:
5443 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5444 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5445 ; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = zero,zero,ymm0[0,1]
5446 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
5447 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
5448 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
5449 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
5450 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
5451 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
5452 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
5453 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
5454 ; AVX-NEXT: vzeroupper
5457 ; AVX2-LABEL: vec384_v6i64_to_v2i192_factor3:
5459 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
5460 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5461 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
5462 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1]
5463 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
5464 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
5465 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5466 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
5467 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
5468 ; AVX2-NEXT: vzeroupper
5471 ; AVX512F-LABEL: vec384_v6i64_to_v2i192_factor3:
5473 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
5474 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5475 ; AVX512F-NEXT: movb $9, %al
5476 ; AVX512F-NEXT: kmovw %eax, %k1
5477 ; AVX512F-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
5478 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5479 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
5480 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
5481 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
5482 ; AVX512F-NEXT: vzeroupper
5483 ; AVX512F-NEXT: retq
5485 ; AVX512BW-LABEL: vec384_v6i64_to_v2i192_factor3:
5486 ; AVX512BW: # %bb.0:
5487 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
5488 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5489 ; AVX512BW-NEXT: movb $9, %al
5490 ; AVX512BW-NEXT: kmovd %eax, %k1
5491 ; AVX512BW-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
5492 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5493 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5494 ; AVX512BW-NEXT: vzeroupper
5495 ; AVX512BW-NEXT: retq
5496 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5497 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5498 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5499 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
5500 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <6 x i64>
5501 %zextd.vec = shufflevector <6 x i64> %in.vec.cast, <6 x i64> zeroinitializer, <6 x i32> <i32 0, i32 7, i32 8, i32 1, i32 10, i32 11>
5502 %out.bytevec = bitcast <6 x i64> %zextd.vec to <48 x i8>
5503 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5504 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5505 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
5506 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5510 define void @vec384_v6i64_to_v1i384_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5511 ; SSE-LABEL: vec384_v6i64_to_v1i384_factor6:
5513 ; SSE-NEXT: movdqa (%rdi), %xmm0
5514 ; SSE-NEXT: paddb (%rsi), %xmm0
5515 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
5516 ; SSE-NEXT: movaps 16(%rdx), %xmm1
5517 ; SSE-NEXT: movaps 32(%rdx), %xmm2
5518 ; SSE-NEXT: paddb (%rdx), %xmm0
5519 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
5520 ; SSE-NEXT: movaps %xmm2, 32(%rcx)
5521 ; SSE-NEXT: movdqa %xmm0, (%rcx)
5524 ; AVX-LABEL: vec384_v6i64_to_v1i384_factor6:
5526 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5527 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5528 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
5529 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
5530 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
5531 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
5532 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
5533 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
5534 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
5535 ; AVX-NEXT: vzeroupper
5538 ; AVX2-LABEL: vec384_v6i64_to_v1i384_factor6:
5540 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
5541 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5542 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
5543 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
5544 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5545 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
5546 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
5547 ; AVX2-NEXT: vzeroupper
5550 ; AVX512F-LABEL: vec384_v6i64_to_v1i384_factor6:
5552 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
5553 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5554 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
5555 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5556 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
5557 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
5558 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
5559 ; AVX512F-NEXT: vzeroupper
5560 ; AVX512F-NEXT: retq
5562 ; AVX512BW-LABEL: vec384_v6i64_to_v1i384_factor6:
5563 ; AVX512BW: # %bb.0:
5564 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
5565 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
5566 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
5567 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5568 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5569 ; AVX512BW-NEXT: vzeroupper
5570 ; AVX512BW-NEXT: retq
5571 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5572 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5573 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5574 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
5575 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <6 x i64>
5576 %zextd.vec = shufflevector <6 x i64> %in.vec.cast, <6 x i64> zeroinitializer, <6 x i32> <i32 0, i32 7, i32 8, i32 9, i32 10, i32 11>
5577 %out.bytevec = bitcast <6 x i64> %zextd.vec to <48 x i8>
5578 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5579 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5580 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
5581 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5585 define void @vec384_v3i128_to_v1i384_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5586 ; SSE-LABEL: vec384_v3i128_to_v1i384_factor3:
5588 ; SSE-NEXT: movdqa (%rdi), %xmm0
5589 ; SSE-NEXT: paddb (%rsi), %xmm0
5590 ; SSE-NEXT: movaps 16(%rdx), %xmm1
5591 ; SSE-NEXT: movaps 32(%rdx), %xmm2
5592 ; SSE-NEXT: paddb (%rdx), %xmm0
5593 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
5594 ; SSE-NEXT: movaps %xmm2, 32(%rcx)
5595 ; SSE-NEXT: movdqa %xmm0, (%rcx)
5598 ; AVX-LABEL: vec384_v3i128_to_v1i384_factor3:
5600 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5601 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5602 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
5603 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
5604 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
5605 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
5606 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
5607 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
5608 ; AVX-NEXT: vzeroupper
5611 ; AVX2-LABEL: vec384_v3i128_to_v1i384_factor3:
5613 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
5614 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5615 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
5616 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5617 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
5618 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
5619 ; AVX2-NEXT: vzeroupper
5622 ; AVX512F-LABEL: vec384_v3i128_to_v1i384_factor3:
5624 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
5625 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5626 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5627 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
5628 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
5629 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
5630 ; AVX512F-NEXT: vzeroupper
5631 ; AVX512F-NEXT: retq
5633 ; AVX512BW-LABEL: vec384_v3i128_to_v1i384_factor3:
5634 ; AVX512BW: # %bb.0:
5635 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
5636 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5637 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5638 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5639 ; AVX512BW-NEXT: vzeroupper
5640 ; AVX512BW-NEXT: retq
5641 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5642 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5643 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5644 %in.vec.trunc = shufflevector <64 x i8> %in.vec, <64 x i8> poison, <48 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
5645 %in.vec.cast = bitcast <48 x i8> %in.vec.trunc to <3 x i128>
5646 %zextd.vec = shufflevector <3 x i128> %in.vec.cast, <3 x i128> zeroinitializer, <3 x i32> <i32 0, i32 4, i32 5>
5647 %out.bytevec = bitcast <3 x i128> %zextd.vec to <48 x i8>
5648 %out.bytevec.padded = shufflevector <48 x i8> %out.bytevec, <48 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
5649 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5650 %out.vec = add <64 x i8> %out.bytevec.padded, %out.vec.bias
5651 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5655 define void @vec512_v64i8_to_v32i16_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5656 ; SSE2-LABEL: vec512_v64i8_to_v32i16_factor2:
5658 ; SSE2-NEXT: movdqa (%rdi), %xmm0
5659 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
5660 ; SSE2-NEXT: paddb (%rsi), %xmm0
5661 ; SSE2-NEXT: paddb 16(%rsi), %xmm1
5662 ; SSE2-NEXT: pxor %xmm2, %xmm2
5663 ; SSE2-NEXT: movdqa %xmm1, %xmm3
5664 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
5665 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
5666 ; SSE2-NEXT: movdqa %xmm0, %xmm4
5667 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
5668 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
5669 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
5670 ; SSE2-NEXT: paddb (%rdx), %xmm4
5671 ; SSE2-NEXT: paddb 48(%rdx), %xmm1
5672 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
5673 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
5674 ; SSE2-NEXT: movdqa %xmm1, 48(%rcx)
5675 ; SSE2-NEXT: movdqa %xmm4, (%rcx)
5676 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
5679 ; SSE42-LABEL: vec512_v64i8_to_v32i16_factor2:
5681 ; SSE42-NEXT: movdqa (%rdi), %xmm0
5682 ; SSE42-NEXT: movdqa 16(%rdi), %xmm1
5683 ; SSE42-NEXT: paddb (%rsi), %xmm0
5684 ; SSE42-NEXT: paddb 16(%rsi), %xmm1
5685 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
5686 ; SSE42-NEXT: pxor %xmm3, %xmm3
5687 ; SSE42-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
5688 ; SSE42-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
5689 ; SSE42-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
5690 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
5691 ; SSE42-NEXT: paddb (%rdx), %xmm4
5692 ; SSE42-NEXT: paddb 48(%rdx), %xmm1
5693 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
5694 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
5695 ; SSE42-NEXT: movdqa %xmm1, 48(%rcx)
5696 ; SSE42-NEXT: movdqa %xmm4, (%rcx)
5697 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
5700 ; AVX-LABEL: vec512_v64i8_to_v32i16_factor2:
5702 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5703 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
5704 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
5705 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5706 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
5707 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
5708 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
5709 ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
5710 ; AVX-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
5711 ; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm1
5712 ; AVX-NEXT: vpaddb 32(%rdx), %xmm4, %xmm3
5713 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
5714 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
5715 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
5716 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
5717 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
5718 ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx)
5721 ; AVX2-LABEL: vec512_v64i8_to_v32i16_factor2:
5723 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
5724 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5725 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
5726 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
5727 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
5728 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5729 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
5730 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
5731 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
5732 ; AVX2-NEXT: vzeroupper
5735 ; AVX512F-LABEL: vec512_v64i8_to_v32i16_factor2:
5737 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
5738 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5739 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
5740 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
5741 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
5742 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5743 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
5744 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
5745 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
5746 ; AVX512F-NEXT: vzeroupper
5747 ; AVX512F-NEXT: retq
5749 ; AVX512BW-LABEL: vec512_v64i8_to_v32i16_factor2:
5750 ; AVX512BW: # %bb.0:
5751 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
5752 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
5753 ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
5754 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5755 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5756 ; AVX512BW-NEXT: vzeroupper
5757 ; AVX512BW-NEXT: retq
5758 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5759 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5760 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5761 %zextd.vec = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 1, i32 67, i32 2, i32 69, i32 3, i32 71, i32 4, i32 73, i32 5, i32 75, i32 6, i32 77, i32 7, i32 79, i32 8, i32 81, i32 9, i32 83, i32 10, i32 85, i32 11, i32 87, i32 12, i32 89, i32 13, i32 91, i32 14, i32 93, i32 15, i32 95, i32 16, i32 97, i32 17, i32 99, i32 18, i32 101, i32 19, i32 103, i32 20, i32 105, i32 21, i32 107, i32 22, i32 109, i32 23, i32 111, i32 24, i32 113, i32 25, i32 115, i32 26, i32 117, i32 27, i32 119, i32 28, i32 121, i32 29, i32 123, i32 30, i32 125, i32 31, i32 127>
5762 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5763 %out.vec = add <64 x i8> %zextd.vec, %out.vec.bias
5764 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5768 define void @vec512_v64i8_to_v16i32_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5769 ; SSE2-LABEL: vec512_v64i8_to_v16i32_factor4:
5771 ; SSE2-NEXT: movdqa (%rdi), %xmm0
5772 ; SSE2-NEXT: paddb (%rsi), %xmm0
5773 ; SSE2-NEXT: pxor %xmm1, %xmm1
5774 ; SSE2-NEXT: movdqa %xmm0, %xmm2
5775 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
5776 ; SSE2-NEXT: movdqa %xmm2, %xmm3
5777 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
5778 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
5779 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5780 ; SSE2-NEXT: movdqa %xmm0, %xmm4
5781 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
5782 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5783 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
5784 ; SSE2-NEXT: paddb (%rdx), %xmm4
5785 ; SSE2-NEXT: paddb 48(%rdx), %xmm2
5786 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
5787 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
5788 ; SSE2-NEXT: movdqa %xmm2, 48(%rcx)
5789 ; SSE2-NEXT: movdqa %xmm4, (%rcx)
5790 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
5793 ; SSE42-LABEL: vec512_v64i8_to_v16i32_factor4:
5795 ; SSE42-NEXT: movdqa (%rdi), %xmm0
5796 ; SSE42-NEXT: paddb (%rsi), %xmm0
5797 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
5798 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
5799 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
5800 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
5801 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
5802 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
5803 ; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
5804 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
5805 ; SSE42-NEXT: paddb 48(%rdx), %xmm3
5806 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
5807 ; SSE42-NEXT: paddb (%rdx), %xmm1
5808 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
5809 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
5810 ; SSE42-NEXT: movdqa %xmm3, 48(%rcx)
5811 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
5814 ; AVX-LABEL: vec512_v64i8_to_v16i32_factor4:
5816 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5817 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5818 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
5819 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
5820 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
5821 ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
5822 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
5823 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
5824 ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
5825 ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm0
5826 ; AVX-NEXT: vpaddb 32(%rdx), %xmm3, %xmm3
5827 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
5828 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
5829 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
5830 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
5831 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
5832 ; AVX-NEXT: vmovdqa %xmm0, 48(%rcx)
5835 ; AVX2-LABEL: vec512_v64i8_to_v16i32_factor4:
5837 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
5838 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5839 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
5840 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
5841 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
5842 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5843 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
5844 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
5845 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
5846 ; AVX2-NEXT: vzeroupper
5849 ; AVX512F-LABEL: vec512_v64i8_to_v16i32_factor4:
5851 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
5852 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5853 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
5854 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
5855 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
5856 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5857 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
5858 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
5859 ; AVX512F-NEXT: vzeroupper
5860 ; AVX512F-NEXT: retq
5862 ; AVX512BW-LABEL: vec512_v64i8_to_v16i32_factor4:
5863 ; AVX512BW: # %bb.0:
5864 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
5865 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5866 ; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
5867 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5868 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5869 ; AVX512BW-NEXT: vzeroupper
5870 ; AVX512BW-NEXT: retq
5871 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5872 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5873 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5874 %zextd.vec = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 1, i32 69, i32 70, i32 71, i32 2, i32 73, i32 74, i32 75, i32 3, i32 77, i32 78, i32 79, i32 4, i32 81, i32 82, i32 83, i32 5, i32 85, i32 86, i32 87, i32 6, i32 89, i32 90, i32 91, i32 7, i32 93, i32 94, i32 95, i32 8, i32 97, i32 98, i32 99, i32 9, i32 101, i32 102, i32 103, i32 10, i32 105, i32 106, i32 107, i32 11, i32 109, i32 110, i32 111, i32 12, i32 113, i32 114, i32 115, i32 13, i32 117, i32 118, i32 119, i32 14, i32 121, i32 122, i32 123, i32 15, i32 125, i32 126, i32 127>
5875 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5876 %out.vec = add <64 x i8> %zextd.vec, %out.vec.bias
5877 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5881 define void @vec512_v64i8_to_v8i64_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5882 ; SSE2-LABEL: vec512_v64i8_to_v8i64_factor8:
5884 ; SSE2-NEXT: movdqa (%rdi), %xmm0
5885 ; SSE2-NEXT: paddb (%rsi), %xmm0
5886 ; SSE2-NEXT: pxor %xmm1, %xmm1
5887 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
5888 ; SSE2-NEXT: movdqa %xmm0, %xmm2
5889 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
5890 ; SSE2-NEXT: movdqa %xmm2, %xmm3
5891 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
5892 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
5893 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
5894 ; SSE2-NEXT: movdqa %xmm0, %xmm4
5895 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
5896 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
5897 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
5898 ; SSE2-NEXT: paddb (%rdx), %xmm4
5899 ; SSE2-NEXT: paddb 48(%rdx), %xmm2
5900 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
5901 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
5902 ; SSE2-NEXT: movdqa %xmm2, 48(%rcx)
5903 ; SSE2-NEXT: movdqa %xmm4, (%rcx)
5904 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
5907 ; SSE42-LABEL: vec512_v64i8_to_v8i64_factor8:
5909 ; SSE42-NEXT: movdqa (%rdi), %xmm0
5910 ; SSE42-NEXT: paddb (%rsi), %xmm0
5911 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
5912 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
5913 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
5914 ; SSE42-NEXT: movdqa %xmm0, %xmm3
5915 ; SSE42-NEXT: psrlq $48, %xmm3
5916 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
5917 ; SSE42-NEXT: psrld $16, %xmm0
5918 ; SSE42-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
5919 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
5920 ; SSE42-NEXT: paddb 48(%rdx), %xmm3
5921 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
5922 ; SSE42-NEXT: paddb (%rdx), %xmm1
5923 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
5924 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
5925 ; SSE42-NEXT: movdqa %xmm3, 48(%rcx)
5926 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
5929 ; AVX-LABEL: vec512_v64i8_to_v8i64_factor8:
5931 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
5932 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5933 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
5934 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm2
5935 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
5936 ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
5937 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
5938 ; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
5939 ; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
5940 ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm0
5941 ; AVX-NEXT: vpaddb 32(%rdx), %xmm3, %xmm3
5942 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
5943 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
5944 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
5945 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
5946 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
5947 ; AVX-NEXT: vmovdqa %xmm0, 48(%rcx)
5950 ; AVX2-LABEL: vec512_v64i8_to_v8i64_factor8:
5952 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
5953 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5954 ; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
5955 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
5956 ; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
5957 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
5958 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
5959 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
5960 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
5961 ; AVX2-NEXT: vzeroupper
5964 ; AVX512F-LABEL: vec512_v64i8_to_v8i64_factor8:
5966 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
5967 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5968 ; AVX512F-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
5969 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
5970 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
5971 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
5972 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
5973 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
5974 ; AVX512F-NEXT: vzeroupper
5975 ; AVX512F-NEXT: retq
5977 ; AVX512BW-LABEL: vec512_v64i8_to_v8i64_factor8:
5978 ; AVX512BW: # %bb.0:
5979 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
5980 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
5981 ; AVX512BW-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
5982 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
5983 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
5984 ; AVX512BW-NEXT: vzeroupper
5985 ; AVX512BW-NEXT: retq
5986 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
5987 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
5988 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
5989 %zextd.vec = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 1, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 2, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 3, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 4, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 5, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 6, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 7, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
5990 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
5991 %out.vec = add <64 x i8> %zextd.vec, %out.vec.bias
5992 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
5996 define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
5997 ; SSE-LABEL: vec512_v64i8_to_v4i128_factor16:
5999 ; SSE-NEXT: movdqa (%rdi), %xmm0
6000 ; SSE-NEXT: paddb (%rsi), %xmm0
6001 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0]
6002 ; SSE-NEXT: pand %xmm0, %xmm1
6003 ; SSE-NEXT: movdqa %xmm0, %xmm2
6004 ; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2]
6005 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6006 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
6007 ; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6008 ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
6009 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6010 ; SSE-NEXT: paddb 16(%rdx), %xmm0
6011 ; SSE-NEXT: paddb 48(%rdx), %xmm3
6012 ; SSE-NEXT: paddb 32(%rdx), %xmm2
6013 ; SSE-NEXT: paddb (%rdx), %xmm1
6014 ; SSE-NEXT: movdqa %xmm1, (%rcx)
6015 ; SSE-NEXT: movdqa %xmm2, 32(%rcx)
6016 ; SSE-NEXT: movdqa %xmm3, 48(%rcx)
6017 ; SSE-NEXT: movdqa %xmm0, 16(%rcx)
6020 ; AVX-LABEL: vec512_v64i8_to_v4i128_factor16:
6022 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6023 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6024 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
6025 ; AVX-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
6026 ; AVX-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6027 ; AVX-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2]
6028 ; AVX-NEXT: vpsrldq {{.*#+}} xmm3 = xmm3[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6029 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
6030 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6031 ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm0
6032 ; AVX-NEXT: vpaddb 32(%rdx), %xmm3, %xmm3
6033 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
6034 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
6035 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
6036 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
6037 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
6038 ; AVX-NEXT: vmovdqa %xmm0, 48(%rcx)
6041 ; AVX2-SLOW-LABEL: vec512_v64i8_to_v4i128_factor16:
6042 ; AVX2-SLOW: # %bb.0:
6043 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0
6044 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6045 ; AVX2-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6046 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6047 ; AVX2-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
6048 ; AVX2-SLOW-NEXT: # ymm2 = mem[0,1,0,1]
6049 ; AVX2-SLOW-NEXT: vpand %ymm2, %ymm1, %ymm1
6050 ; AVX2-SLOW-NEXT: vpsrld $16, %xmm0, %xmm0
6051 ; AVX2-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6052 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
6053 ; AVX2-SLOW-NEXT: vpand %ymm2, %ymm0, %ymm0
6054 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6055 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6056 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
6057 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
6058 ; AVX2-SLOW-NEXT: vzeroupper
6059 ; AVX2-SLOW-NEXT: retq
6061 ; AVX2-FAST-PERLANE-LABEL: vec512_v64i8_to_v4i128_factor16:
6062 ; AVX2-FAST-PERLANE: # %bb.0:
6063 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0
6064 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6065 ; AVX2-FAST-PERLANE-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6066 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6067 ; AVX2-FAST-PERLANE-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
6068 ; AVX2-FAST-PERLANE-NEXT: # ymm2 = mem[0,1,0,1]
6069 ; AVX2-FAST-PERLANE-NEXT: vpand %ymm2, %ymm1, %ymm1
6070 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
6071 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
6072 ; AVX2-FAST-PERLANE-NEXT: vpand %ymm2, %ymm0, %ymm0
6073 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6074 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6075 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
6076 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
6077 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
6078 ; AVX2-FAST-PERLANE-NEXT: retq
6080 ; AVX2-FAST-LABEL: vec512_v64i8_to_v4i128_factor16:
6081 ; AVX2-FAST: # %bb.0:
6082 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0
6083 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6084 ; AVX2-FAST-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6085 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6086 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
6087 ; AVX2-FAST-NEXT: # ymm2 = mem[0,1,0,1]
6088 ; AVX2-FAST-NEXT: vpand %ymm2, %ymm1, %ymm1
6089 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
6090 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
6091 ; AVX2-FAST-NEXT: vpand %ymm2, %ymm0, %ymm0
6092 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6093 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6094 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
6095 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
6096 ; AVX2-FAST-NEXT: vzeroupper
6097 ; AVX2-FAST-NEXT: retq
6099 ; AVX512F-SLOW-LABEL: vec512_v64i8_to_v4i128_factor16:
6100 ; AVX512F-SLOW: # %bb.0:
6101 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0
6102 ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6103 ; AVX512F-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6104 ; AVX512F-SLOW-NEXT: vpsrld $16, %xmm0, %xmm0
6105 ; AVX512F-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6106 ; AVX512F-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
6107 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
6108 ; AVX512F-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
6109 ; AVX512F-SLOW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
6110 ; AVX512F-SLOW-NEXT: vpandq %zmm1, %zmm0, %zmm0
6111 ; AVX512F-SLOW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
6112 ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
6113 ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6114 ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%rcx)
6115 ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, 32(%rcx)
6116 ; AVX512F-SLOW-NEXT: vzeroupper
6117 ; AVX512F-SLOW-NEXT: retq
6119 ; AVX512F-FAST-LABEL: vec512_v64i8_to_v4i128_factor16:
6120 ; AVX512F-FAST: # %bb.0:
6121 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
6122 ; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6123 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
6124 ; AVX512F-FAST-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6125 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
6126 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
6127 ; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
6128 ; AVX512F-FAST-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
6129 ; AVX512F-FAST-NEXT: vpandq %zmm1, %zmm0, %zmm0
6130 ; AVX512F-FAST-NEXT: vextracti64x4 $1, %zmm0, %ymm1
6131 ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
6132 ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6133 ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%rcx)
6134 ; AVX512F-FAST-NEXT: vmovdqa %ymm1, 32(%rcx)
6135 ; AVX512F-FAST-NEXT: vzeroupper
6136 ; AVX512F-FAST-NEXT: retq
6138 ; AVX512BW-SLOW-LABEL: vec512_v64i8_to_v4i128_factor16:
6139 ; AVX512BW-SLOW: # %bb.0:
6140 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %xmm0
6141 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6142 ; AVX512BW-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6143 ; AVX512BW-SLOW-NEXT: vpsrld $16, %xmm0, %xmm0
6144 ; AVX512BW-SLOW-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6145 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm0
6146 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
6147 ; AVX512BW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
6148 ; AVX512BW-SLOW-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
6149 ; AVX512BW-SLOW-NEXT: vpandq %zmm1, %zmm0, %zmm0
6150 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6151 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
6152 ; AVX512BW-SLOW-NEXT: vzeroupper
6153 ; AVX512BW-SLOW-NEXT: retq
6155 ; AVX512BW-FAST-LABEL: vec512_v64i8_to_v4i128_factor16:
6156 ; AVX512BW-FAST: # %bb.0:
6157 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %xmm0
6158 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6159 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
6160 ; AVX512BW-FAST-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
6161 ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
6162 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
6163 ; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
6164 ; AVX512BW-FAST-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
6165 ; AVX512BW-FAST-NEXT: vpandq %zmm1, %zmm0, %zmm0
6166 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6167 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
6168 ; AVX512BW-FAST-NEXT: vzeroupper
6169 ; AVX512BW-FAST-NEXT: retq
6170 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6171 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6172 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6173 %zextd.vec = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 1, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 2, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 3, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
6174 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6175 %out.vec = add <64 x i8> %zextd.vec, %out.vec.bias
6176 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6180 define void @vec512_v64i8_to_v2i256_factor32(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6181 ; SSE-LABEL: vec512_v64i8_to_v2i256_factor32:
6183 ; SSE-NEXT: movdqa (%rdi), %xmm0
6184 ; SSE-NEXT: paddb (%rsi), %xmm0
6185 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0]
6186 ; SSE-NEXT: pand %xmm0, %xmm1
6187 ; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
6188 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6189 ; SSE-NEXT: movaps 16(%rdx), %xmm2
6190 ; SSE-NEXT: movaps 48(%rdx), %xmm3
6191 ; SSE-NEXT: paddb 32(%rdx), %xmm0
6192 ; SSE-NEXT: paddb (%rdx), %xmm1
6193 ; SSE-NEXT: movaps %xmm3, 48(%rcx)
6194 ; SSE-NEXT: movaps %xmm2, 16(%rcx)
6195 ; SSE-NEXT: movdqa %xmm1, (%rcx)
6196 ; SSE-NEXT: movdqa %xmm0, 32(%rcx)
6199 ; AVX-LABEL: vec512_v64i8_to_v2i256_factor32:
6201 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6202 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6203 ; AVX-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
6204 ; AVX-NEXT: vpsrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6205 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
6206 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
6207 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
6208 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
6209 ; AVX-NEXT: vmovaps 48(%rdx), %xmm3
6210 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
6211 ; AVX-NEXT: vmovaps %xmm3, 48(%rcx)
6212 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
6213 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
6216 ; AVX2-LABEL: vec512_v64i8_to_v2i256_factor32:
6218 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
6219 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6220 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
6221 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm1
6222 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6223 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6224 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6225 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
6226 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
6227 ; AVX2-NEXT: vzeroupper
6230 ; AVX512F-LABEL: vec512_v64i8_to_v2i256_factor32:
6232 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
6233 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6234 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
6235 ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm1
6236 ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6237 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6238 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6239 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
6240 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
6241 ; AVX512F-NEXT: vzeroupper
6242 ; AVX512F-NEXT: retq
6244 ; AVX512BW-LABEL: vec512_v64i8_to_v2i256_factor32:
6245 ; AVX512BW: # %bb.0:
6246 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
6247 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6248 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
6249 ; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm1
6250 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6251 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
6252 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6253 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
6254 ; AVX512BW-NEXT: vzeroupper
6255 ; AVX512BW-NEXT: retq
6256 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6257 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6258 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6259 %zextd.vec = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 1, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
6260 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6261 %out.vec = add <64 x i8> %zextd.vec, %out.vec.bias
6262 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6266 define void @vec512_v64i8_to_v1i512_factor64(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6267 ; SSE-LABEL: vec512_v64i8_to_v1i512_factor64:
6269 ; SSE-NEXT: movdqa (%rdi), %xmm0
6270 ; SSE-NEXT: paddb (%rsi), %xmm0
6271 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
6272 ; SSE-NEXT: movaps 16(%rdx), %xmm1
6273 ; SSE-NEXT: movaps 32(%rdx), %xmm2
6274 ; SSE-NEXT: movaps 48(%rdx), %xmm3
6275 ; SSE-NEXT: paddb (%rdx), %xmm0
6276 ; SSE-NEXT: movaps %xmm2, 32(%rcx)
6277 ; SSE-NEXT: movaps %xmm3, 48(%rcx)
6278 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
6279 ; SSE-NEXT: movdqa %xmm0, (%rcx)
6282 ; AVX-LABEL: vec512_v64i8_to_v1i512_factor64:
6284 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6285 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6286 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
6287 ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
6288 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
6289 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
6290 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
6291 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
6292 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
6293 ; AVX-NEXT: vzeroupper
6296 ; AVX2-LABEL: vec512_v64i8_to_v1i512_factor64:
6298 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
6299 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6300 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
6301 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
6302 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
6303 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6304 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
6305 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
6306 ; AVX2-NEXT: vzeroupper
6309 ; AVX512F-LABEL: vec512_v64i8_to_v1i512_factor64:
6311 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
6312 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6313 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
6314 ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
6315 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6316 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
6317 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
6318 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
6319 ; AVX512F-NEXT: vzeroupper
6320 ; AVX512F-NEXT: retq
6322 ; AVX512BW-LABEL: vec512_v64i8_to_v1i512_factor64:
6323 ; AVX512BW: # %bb.0:
6324 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
6325 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
6326 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,0,0]
6327 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
6328 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6329 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
6330 ; AVX512BW-NEXT: vzeroupper
6331 ; AVX512BW-NEXT: retq
6332 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6333 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6334 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6335 %zextd.vec = shufflevector <64 x i8> %in.vec, <64 x i8> zeroinitializer, <64 x i32> <i32 0, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
6336 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6337 %out.vec = add <64 x i8> %zextd.vec, %out.vec.bias
6338 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6342 define void @vec512_v32i16_to_v16i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6343 ; SSE2-LABEL: vec512_v32i16_to_v16i32_factor2:
6345 ; SSE2-NEXT: movdqa (%rdi), %xmm0
6346 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
6347 ; SSE2-NEXT: paddb (%rsi), %xmm0
6348 ; SSE2-NEXT: paddb 16(%rsi), %xmm1
6349 ; SSE2-NEXT: pxor %xmm2, %xmm2
6350 ; SSE2-NEXT: movdqa %xmm1, %xmm3
6351 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
6352 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
6353 ; SSE2-NEXT: movdqa %xmm0, %xmm4
6354 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
6355 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
6356 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
6357 ; SSE2-NEXT: paddb (%rdx), %xmm4
6358 ; SSE2-NEXT: paddb 48(%rdx), %xmm1
6359 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
6360 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
6361 ; SSE2-NEXT: movdqa %xmm1, 48(%rcx)
6362 ; SSE2-NEXT: movdqa %xmm4, (%rcx)
6363 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
6366 ; SSE42-LABEL: vec512_v32i16_to_v16i32_factor2:
6368 ; SSE42-NEXT: movdqa (%rdi), %xmm0
6369 ; SSE42-NEXT: movdqa 16(%rdi), %xmm1
6370 ; SSE42-NEXT: paddb (%rsi), %xmm0
6371 ; SSE42-NEXT: paddb 16(%rsi), %xmm1
6372 ; SSE42-NEXT: pxor %xmm2, %xmm2
6373 ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
6374 ; SSE42-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
6375 ; SSE42-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
6376 ; SSE42-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
6377 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
6378 ; SSE42-NEXT: paddb (%rdx), %xmm4
6379 ; SSE42-NEXT: paddb 48(%rdx), %xmm1
6380 ; SSE42-NEXT: paddb 32(%rdx), %xmm3
6381 ; SSE42-NEXT: movdqa %xmm3, 32(%rcx)
6382 ; SSE42-NEXT: movdqa %xmm1, 48(%rcx)
6383 ; SSE42-NEXT: movdqa %xmm4, (%rcx)
6384 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
6387 ; AVX-LABEL: vec512_v32i16_to_v16i32_factor2:
6389 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6390 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
6391 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
6392 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6393 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
6394 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
6395 ; AVX-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
6396 ; AVX-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
6397 ; AVX-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
6398 ; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm1
6399 ; AVX-NEXT: vpaddb 32(%rdx), %xmm4, %xmm3
6400 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
6401 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
6402 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
6403 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
6404 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
6405 ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx)
6408 ; AVX2-LABEL: vec512_v32i16_to_v16i32_factor2:
6410 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
6411 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6412 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
6413 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
6414 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
6415 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6416 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6417 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
6418 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
6419 ; AVX2-NEXT: vzeroupper
6422 ; AVX512F-LABEL: vec512_v32i16_to_v16i32_factor2:
6424 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
6425 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6426 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
6427 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
6428 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
6429 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6430 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
6431 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
6432 ; AVX512F-NEXT: vzeroupper
6433 ; AVX512F-NEXT: retq
6435 ; AVX512BW-LABEL: vec512_v32i16_to_v16i32_factor2:
6436 ; AVX512BW: # %bb.0:
6437 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
6438 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6439 ; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
6440 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6441 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
6442 ; AVX512BW-NEXT: vzeroupper
6443 ; AVX512BW-NEXT: retq
6444 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6445 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6446 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6447 %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16>
6448 %zextd.vec = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 1, i32 35, i32 2, i32 37, i32 3, i32 39, i32 4, i32 41, i32 5, i32 43, i32 6, i32 45, i32 7, i32 47, i32 8, i32 49, i32 9, i32 51, i32 10, i32 53, i32 11, i32 55, i32 12, i32 57, i32 13, i32 59, i32 14, i32 61, i32 15, i32 63>
6449 %out.bytevec = bitcast <32 x i16> %zextd.vec to <64 x i8>
6450 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6451 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
6452 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6456 define void @vec512_v32i16_to_v8i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6457 ; SSE2-LABEL: vec512_v32i16_to_v8i64_factor4:
6459 ; SSE2-NEXT: movdqa (%rdi), %xmm0
6460 ; SSE2-NEXT: paddb (%rsi), %xmm0
6461 ; SSE2-NEXT: pxor %xmm1, %xmm1
6462 ; SSE2-NEXT: movdqa %xmm0, %xmm2
6463 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
6464 ; SSE2-NEXT: movdqa %xmm2, %xmm3
6465 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
6466 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
6467 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
6468 ; SSE2-NEXT: movdqa %xmm0, %xmm4
6469 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
6470 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
6471 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
6472 ; SSE2-NEXT: paddb (%rdx), %xmm4
6473 ; SSE2-NEXT: paddb 48(%rdx), %xmm2
6474 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
6475 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
6476 ; SSE2-NEXT: movdqa %xmm2, 48(%rcx)
6477 ; SSE2-NEXT: movdqa %xmm4, (%rcx)
6478 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
6481 ; SSE42-LABEL: vec512_v32i16_to_v8i64_factor4:
6483 ; SSE42-NEXT: movdqa (%rdi), %xmm0
6484 ; SSE42-NEXT: paddb (%rsi), %xmm0
6485 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
6486 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
6487 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
6488 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
6489 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
6490 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6491 ; SSE42-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
6492 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
6493 ; SSE42-NEXT: paddb 48(%rdx), %xmm3
6494 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
6495 ; SSE42-NEXT: paddb (%rdx), %xmm1
6496 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
6497 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
6498 ; SSE42-NEXT: movdqa %xmm3, 48(%rcx)
6499 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
6502 ; AVX-LABEL: vec512_v32i16_to_v8i64_factor4:
6504 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6505 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6506 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
6507 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
6508 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
6509 ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
6510 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
6511 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
6512 ; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
6513 ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm0
6514 ; AVX-NEXT: vpaddb 32(%rdx), %xmm3, %xmm3
6515 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
6516 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
6517 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
6518 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
6519 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
6520 ; AVX-NEXT: vmovdqa %xmm0, 48(%rcx)
6523 ; AVX2-LABEL: vec512_v32i16_to_v8i64_factor4:
6525 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
6526 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6527 ; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
6528 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
6529 ; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
6530 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6531 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6532 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
6533 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
6534 ; AVX2-NEXT: vzeroupper
6537 ; AVX512F-LABEL: vec512_v32i16_to_v8i64_factor4:
6539 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
6540 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6541 ; AVX512F-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
6542 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
6543 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
6544 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6545 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
6546 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
6547 ; AVX512F-NEXT: vzeroupper
6548 ; AVX512F-NEXT: retq
6550 ; AVX512BW-LABEL: vec512_v32i16_to_v8i64_factor4:
6551 ; AVX512BW: # %bb.0:
6552 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
6553 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6554 ; AVX512BW-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
6555 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6556 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
6557 ; AVX512BW-NEXT: vzeroupper
6558 ; AVX512BW-NEXT: retq
6559 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6560 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6561 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6562 %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16>
6563 %zextd.vec = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 1, i32 37, i32 38, i32 39, i32 2, i32 41, i32 42, i32 43, i32 3, i32 45, i32 46, i32 47, i32 4, i32 49, i32 50, i32 51, i32 5, i32 53, i32 54, i32 55, i32 6, i32 57, i32 58, i32 59, i32 7, i32 61, i32 62, i32 63>
6564 %out.bytevec = bitcast <32 x i16> %zextd.vec to <64 x i8>
6565 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6566 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
6567 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6571 define void @vec512_v32i16_to_v4i128_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6572 ; SSE2-LABEL: vec512_v32i16_to_v4i128_factor8:
6574 ; SSE2-NEXT: movdqa (%rdi), %xmm0
6575 ; SSE2-NEXT: paddb (%rsi), %xmm0
6576 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0]
6577 ; SSE2-NEXT: pand %xmm0, %xmm1
6578 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
6579 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
6580 ; SSE2-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
6581 ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6582 ; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6583 ; SSE2-NEXT: psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6584 ; SSE2-NEXT: paddb 16(%rdx), %xmm3
6585 ; SSE2-NEXT: paddb 48(%rdx), %xmm2
6586 ; SSE2-NEXT: paddb 32(%rdx), %xmm0
6587 ; SSE2-NEXT: paddb (%rdx), %xmm1
6588 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
6589 ; SSE2-NEXT: movdqa %xmm0, 32(%rcx)
6590 ; SSE2-NEXT: movdqa %xmm2, 48(%rcx)
6591 ; SSE2-NEXT: movdqa %xmm3, 16(%rcx)
6594 ; SSE42-LABEL: vec512_v32i16_to_v4i128_factor8:
6596 ; SSE42-NEXT: movdqa (%rdi), %xmm0
6597 ; SSE42-NEXT: paddb (%rsi), %xmm0
6598 ; SSE42-NEXT: pxor %xmm1, %xmm1
6599 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
6600 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
6601 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
6602 ; SSE42-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
6603 ; SSE42-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6604 ; SSE42-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6605 ; SSE42-NEXT: psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6606 ; SSE42-NEXT: paddb 16(%rdx), %xmm3
6607 ; SSE42-NEXT: paddb 48(%rdx), %xmm2
6608 ; SSE42-NEXT: paddb 32(%rdx), %xmm0
6609 ; SSE42-NEXT: paddb (%rdx), %xmm1
6610 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
6611 ; SSE42-NEXT: movdqa %xmm0, 32(%rcx)
6612 ; SSE42-NEXT: movdqa %xmm2, 48(%rcx)
6613 ; SSE42-NEXT: movdqa %xmm3, 16(%rcx)
6616 ; AVX-LABEL: vec512_v32i16_to_v4i128_factor8:
6618 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6619 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6620 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
6621 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
6622 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
6623 ; AVX-NEXT: vpsrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6624 ; AVX-NEXT: vpslldq {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
6625 ; AVX-NEXT: vpsrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6626 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
6627 ; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6628 ; AVX-NEXT: vpaddb 48(%rdx), %xmm0, %xmm0
6629 ; AVX-NEXT: vpaddb 32(%rdx), %xmm3, %xmm3
6630 ; AVX-NEXT: vpaddb 16(%rdx), %xmm2, %xmm2
6631 ; AVX-NEXT: vpaddb (%rdx), %xmm1, %xmm1
6632 ; AVX-NEXT: vmovdqa %xmm1, (%rcx)
6633 ; AVX-NEXT: vmovdqa %xmm2, 16(%rcx)
6634 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
6635 ; AVX-NEXT: vmovdqa %xmm0, 48(%rcx)
6638 ; AVX2-SLOW-LABEL: vec512_v32i16_to_v4i128_factor8:
6639 ; AVX2-SLOW: # %bb.0:
6640 ; AVX2-SLOW-NEXT: vpxor %xmm0, %xmm0, %xmm0
6641 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1
6642 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1
6643 ; AVX2-SLOW-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
6644 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3]
6645 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
6646 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
6647 ; AVX2-SLOW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
6648 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6649 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
6650 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6651 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm2, %ymm1
6652 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
6653 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
6654 ; AVX2-SLOW-NEXT: vzeroupper
6655 ; AVX2-SLOW-NEXT: retq
6657 ; AVX2-FAST-PERLANE-LABEL: vec512_v32i16_to_v4i128_factor8:
6658 ; AVX2-FAST-PERLANE: # %bb.0:
6659 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm0, %xmm0, %xmm0
6660 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
6661 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm1, %xmm1
6662 ; AVX2-FAST-PERLANE-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
6663 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3]
6664 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
6665 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u]
6666 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6667 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
6668 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6669 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm2, %ymm1
6670 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
6671 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
6672 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
6673 ; AVX2-FAST-PERLANE-NEXT: retq
6675 ; AVX2-FAST-LABEL: vec512_v32i16_to_v4i128_factor8:
6676 ; AVX2-FAST: # %bb.0:
6677 ; AVX2-FAST-NEXT: vpxor %xmm0, %xmm0, %xmm0
6678 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm1
6679 ; AVX2-FAST-NEXT: vpaddb (%rsi), %xmm1, %xmm1
6680 ; AVX2-FAST-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
6681 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3]
6682 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
6683 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u]
6684 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6685 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
6686 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6687 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm1
6688 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
6689 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
6690 ; AVX2-FAST-NEXT: vzeroupper
6691 ; AVX2-FAST-NEXT: retq
6693 ; AVX512F-SLOW-LABEL: vec512_v32i16_to_v4i128_factor8:
6694 ; AVX512F-SLOW: # %bb.0:
6695 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0
6696 ; AVX512F-SLOW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6697 ; AVX512F-SLOW-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
6698 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6699 ; AVX512F-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
6700 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
6701 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
6702 ; AVX512F-SLOW-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
6703 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
6704 ; AVX512F-SLOW-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
6705 ; AVX512F-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6706 ; AVX512F-SLOW-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6707 ; AVX512F-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
6708 ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
6709 ; AVX512F-SLOW-NEXT: vzeroupper
6710 ; AVX512F-SLOW-NEXT: retq
6712 ; AVX512F-FAST-LABEL: vec512_v32i16_to_v4i128_factor8:
6713 ; AVX512F-FAST: # %bb.0:
6714 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
6715 ; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6716 ; AVX512F-FAST-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
6717 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
6718 ; AVX512F-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
6719 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
6720 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u]
6721 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
6722 ; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15]
6723 ; AVX512F-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6724 ; AVX512F-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6725 ; AVX512F-FAST-NEXT: vmovdqa %ymm1, (%rcx)
6726 ; AVX512F-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
6727 ; AVX512F-FAST-NEXT: vzeroupper
6728 ; AVX512F-FAST-NEXT: retq
6730 ; AVX512BW-LABEL: vec512_v32i16_to_v4i128_factor8:
6731 ; AVX512BW: # %bb.0:
6732 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
6733 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
6734 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [32,1,2,3,4,5,6,7,33,9,10,11,12,13,14,15,34,17,18,19,20,21,22,23,35,25,26,27,28,29,30,31]
6735 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
6736 ; AVX512BW-NEXT: vpermt2w %zmm0, %zmm1, %zmm2
6737 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm2, %zmm0
6738 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
6739 ; AVX512BW-NEXT: vzeroupper
6740 ; AVX512BW-NEXT: retq
6741 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6742 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6743 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6744 %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16>
6745 %zextd.vec = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 1, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 2, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 3, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
6746 %out.bytevec = bitcast <32 x i16> %zextd.vec to <64 x i8>
6747 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6748 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
6749 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6753 define void @vec512_v32i16_to_v2i256_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6754 ; SSE2-LABEL: vec512_v32i16_to_v2i256_factor16:
6756 ; SSE2-NEXT: movdqa (%rdi), %xmm0
6757 ; SSE2-NEXT: paddb (%rsi), %xmm0
6758 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0]
6759 ; SSE2-NEXT: pand %xmm0, %xmm1
6760 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
6761 ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6762 ; SSE2-NEXT: movaps 16(%rdx), %xmm2
6763 ; SSE2-NEXT: movaps 48(%rdx), %xmm3
6764 ; SSE2-NEXT: paddb 32(%rdx), %xmm0
6765 ; SSE2-NEXT: paddb (%rdx), %xmm1
6766 ; SSE2-NEXT: movaps %xmm3, 48(%rcx)
6767 ; SSE2-NEXT: movaps %xmm2, 16(%rcx)
6768 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
6769 ; SSE2-NEXT: movdqa %xmm0, 32(%rcx)
6772 ; SSE42-LABEL: vec512_v32i16_to_v2i256_factor16:
6774 ; SSE42-NEXT: movdqa (%rdi), %xmm0
6775 ; SSE42-NEXT: paddb (%rsi), %xmm0
6776 ; SSE42-NEXT: pxor %xmm1, %xmm1
6777 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
6778 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
6779 ; SSE42-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6780 ; SSE42-NEXT: movaps 16(%rdx), %xmm2
6781 ; SSE42-NEXT: movaps 48(%rdx), %xmm3
6782 ; SSE42-NEXT: paddb 32(%rdx), %xmm0
6783 ; SSE42-NEXT: paddb (%rdx), %xmm1
6784 ; SSE42-NEXT: movaps %xmm3, 48(%rcx)
6785 ; SSE42-NEXT: movaps %xmm2, 16(%rcx)
6786 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
6787 ; SSE42-NEXT: movdqa %xmm0, 32(%rcx)
6790 ; AVX-LABEL: vec512_v32i16_to_v2i256_factor16:
6792 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6793 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6794 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
6795 ; AVX-NEXT: vpsrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6796 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
6797 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
6798 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7]
6799 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
6800 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
6801 ; AVX-NEXT: vmovaps 48(%rdx), %xmm3
6802 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
6803 ; AVX-NEXT: vmovaps %xmm3, 48(%rcx)
6804 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
6805 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
6808 ; AVX2-LABEL: vec512_v32i16_to_v2i256_factor16:
6810 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
6811 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6812 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
6813 ; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm1
6814 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6815 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6816 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6817 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
6818 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
6819 ; AVX2-NEXT: vzeroupper
6822 ; AVX512F-LABEL: vec512_v32i16_to_v2i256_factor16:
6824 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
6825 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6826 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
6827 ; AVX512F-NEXT: vpand %ymm0, %ymm1, %ymm1
6828 ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6829 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
6830 ; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
6831 ; AVX512F-NEXT: vmovdqa %ymm1, (%rcx)
6832 ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
6833 ; AVX512F-NEXT: vzeroupper
6834 ; AVX512F-NEXT: retq
6836 ; AVX512BW-LABEL: vec512_v32i16_to_v2i256_factor16:
6837 ; AVX512BW: # %bb.0:
6838 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
6839 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6840 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
6841 ; AVX512BW-NEXT: vpand %ymm0, %ymm1, %ymm1
6842 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
6843 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
6844 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6845 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
6846 ; AVX512BW-NEXT: vzeroupper
6847 ; AVX512BW-NEXT: retq
6848 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6849 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6850 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6851 %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16>
6852 %zextd.vec = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 1, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
6853 %out.bytevec = bitcast <32 x i16> %zextd.vec to <64 x i8>
6854 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6855 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
6856 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6860 define void @vec512_v32i16_to_v1i512_factor32(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6861 ; SSE2-LABEL: vec512_v32i16_to_v1i512_factor32:
6863 ; SSE2-NEXT: movdqa (%rdi), %xmm0
6864 ; SSE2-NEXT: paddb (%rsi), %xmm0
6865 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
6866 ; SSE2-NEXT: movaps 16(%rdx), %xmm1
6867 ; SSE2-NEXT: movaps 32(%rdx), %xmm2
6868 ; SSE2-NEXT: movaps 48(%rdx), %xmm3
6869 ; SSE2-NEXT: paddb (%rdx), %xmm0
6870 ; SSE2-NEXT: movaps %xmm2, 32(%rcx)
6871 ; SSE2-NEXT: movaps %xmm3, 48(%rcx)
6872 ; SSE2-NEXT: movaps %xmm1, 16(%rcx)
6873 ; SSE2-NEXT: movdqa %xmm0, (%rcx)
6876 ; SSE42-LABEL: vec512_v32i16_to_v1i512_factor32:
6878 ; SSE42-NEXT: movdqa (%rdi), %xmm0
6879 ; SSE42-NEXT: paddb (%rsi), %xmm0
6880 ; SSE42-NEXT: pxor %xmm1, %xmm1
6881 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
6882 ; SSE42-NEXT: movaps 16(%rdx), %xmm0
6883 ; SSE42-NEXT: movaps 32(%rdx), %xmm2
6884 ; SSE42-NEXT: movaps 48(%rdx), %xmm3
6885 ; SSE42-NEXT: paddb (%rdx), %xmm1
6886 ; SSE42-NEXT: movaps %xmm2, 32(%rcx)
6887 ; SSE42-NEXT: movaps %xmm3, 48(%rcx)
6888 ; SSE42-NEXT: movaps %xmm0, 16(%rcx)
6889 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
6892 ; AVX-LABEL: vec512_v32i16_to_v1i512_factor32:
6894 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
6895 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
6896 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
6897 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
6898 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7]
6899 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
6900 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
6901 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
6902 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
6903 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
6904 ; AVX-NEXT: vzeroupper
6907 ; AVX2-LABEL: vec512_v32i16_to_v1i512_factor32:
6909 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
6910 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6911 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
6912 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
6913 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
6914 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6915 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
6916 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
6917 ; AVX2-NEXT: vzeroupper
6920 ; AVX512F-LABEL: vec512_v32i16_to_v1i512_factor32:
6922 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
6923 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
6924 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,0]
6925 ; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
6926 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
6927 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
6928 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
6929 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
6930 ; AVX512F-NEXT: vzeroupper
6931 ; AVX512F-NEXT: retq
6933 ; AVX512BW-LABEL: vec512_v32i16_to_v1i512_factor32:
6934 ; AVX512BW: # %bb.0:
6935 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
6936 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
6937 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
6938 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
6939 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
6940 ; AVX512BW-NEXT: vzeroupper
6941 ; AVX512BW-NEXT: retq
6942 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
6943 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
6944 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
6945 %in.vec.cast = bitcast <64 x i8> %in.vec to <32 x i16>
6946 %zextd.vec = shufflevector <32 x i16> %in.vec.cast, <32 x i16> zeroinitializer, <32 x i32> <i32 0, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
6947 %out.bytevec = bitcast <32 x i16> %zextd.vec to <64 x i8>
6948 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
6949 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
6950 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
6954 define void @vec512_v16i32_to_v8i64_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
6955 ; SSE2-LABEL: vec512_v16i32_to_v8i64_factor2:
6957 ; SSE2-NEXT: movdqa (%rdi), %xmm0
6958 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1
6959 ; SSE2-NEXT: paddb (%rsi), %xmm0
6960 ; SSE2-NEXT: paddb 16(%rsi), %xmm1
6961 ; SSE2-NEXT: pxor %xmm2, %xmm2
6962 ; SSE2-NEXT: movdqa %xmm1, %xmm3
6963 ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
6964 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
6965 ; SSE2-NEXT: movdqa %xmm0, %xmm4
6966 ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
6967 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6968 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
6969 ; SSE2-NEXT: paddb (%rdx), %xmm4
6970 ; SSE2-NEXT: paddb 48(%rdx), %xmm1
6971 ; SSE2-NEXT: paddb 32(%rdx), %xmm3
6972 ; SSE2-NEXT: movdqa %xmm3, 32(%rcx)
6973 ; SSE2-NEXT: movdqa %xmm1, 48(%rcx)
6974 ; SSE2-NEXT: movdqa %xmm4, (%rcx)
6975 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
6978 ; SSE42-LABEL: vec512_v16i32_to_v8i64_factor2:
6980 ; SSE42-NEXT: movdqa (%rdi), %xmm0
6981 ; SSE42-NEXT: movdqa 16(%rdi), %xmm1
6982 ; SSE42-NEXT: paddb (%rsi), %xmm0
6983 ; SSE42-NEXT: paddb 16(%rsi), %xmm1
6984 ; SSE42-NEXT: pxor %xmm2, %xmm2
6985 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
6986 ; SSE42-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
6987 ; SSE42-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
6988 ; SSE42-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
6989 ; SSE42-NEXT: paddb 16(%rdx), %xmm0
6990 ; SSE42-NEXT: paddb (%rdx), %xmm4
6991 ; SSE42-NEXT: paddb 48(%rdx), %xmm1
6992 ; SSE42-NEXT: paddb 32(%rdx), %xmm3
6993 ; SSE42-NEXT: movdqa %xmm3, 32(%rcx)
6994 ; SSE42-NEXT: movdqa %xmm1, 48(%rcx)
6995 ; SSE42-NEXT: movdqa %xmm4, (%rcx)
6996 ; SSE42-NEXT: movdqa %xmm0, 16(%rcx)
6999 ; AVX-LABEL: vec512_v16i32_to_v8i64_factor2:
7001 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7002 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
7003 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
7004 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7005 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
7006 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
7007 ; AVX-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
7008 ; AVX-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
7009 ; AVX-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
7010 ; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm1
7011 ; AVX-NEXT: vpaddb 32(%rdx), %xmm4, %xmm3
7012 ; AVX-NEXT: vpaddb 16(%rdx), %xmm0, %xmm0
7013 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
7014 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
7015 ; AVX-NEXT: vmovdqa %xmm0, 16(%rcx)
7016 ; AVX-NEXT: vmovdqa %xmm3, 32(%rcx)
7017 ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx)
7020 ; AVX2-LABEL: vec512_v16i32_to_v8i64_factor2:
7022 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
7023 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7024 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
7025 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
7026 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
7027 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7028 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
7029 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
7030 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
7031 ; AVX2-NEXT: vzeroupper
7034 ; AVX512F-LABEL: vec512_v16i32_to_v8i64_factor2:
7036 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7037 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7038 ; AVX512F-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
7039 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
7040 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
7041 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7042 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7043 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
7044 ; AVX512F-NEXT: vzeroupper
7045 ; AVX512F-NEXT: retq
7047 ; AVX512BW-LABEL: vec512_v16i32_to_v8i64_factor2:
7048 ; AVX512BW: # %bb.0:
7049 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
7050 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7051 ; AVX512BW-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
7052 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7053 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7054 ; AVX512BW-NEXT: vzeroupper
7055 ; AVX512BW-NEXT: retq
7056 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7057 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7058 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7059 %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32>
7060 %zextd.vec = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 1, i32 19, i32 2, i32 21, i32 3, i32 23, i32 4, i32 25, i32 5, i32 27, i32 6, i32 29, i32 7, i32 31>
7061 %out.bytevec = bitcast <16 x i32> %zextd.vec to <64 x i8>
7062 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7063 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7064 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7068 define void @vec512_v16i32_to_v4i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7069 ; SSE2-LABEL: vec512_v16i32_to_v4i128_factor4:
7071 ; SSE2-NEXT: movdqa (%rdi), %xmm0
7072 ; SSE2-NEXT: paddb (%rsi), %xmm0
7073 ; SSE2-NEXT: xorps %xmm1, %xmm1
7074 ; SSE2-NEXT: movdqa %xmm0, %xmm2
7075 ; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7076 ; SSE2-NEXT: xorps %xmm3, %xmm3
7077 ; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm0[0],xmm3[1,2,3]
7078 ; SSE2-NEXT: movdqa %xmm0, %xmm4
7079 ; SSE2-NEXT: psrldq {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
7080 ; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[2,3]
7081 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[1,0]
7082 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
7083 ; SSE2-NEXT: paddb 16(%rdx), %xmm0
7084 ; SSE2-NEXT: paddb 32(%rdx), %xmm4
7085 ; SSE2-NEXT: paddb (%rdx), %xmm3
7086 ; SSE2-NEXT: paddb 48(%rdx), %xmm2
7087 ; SSE2-NEXT: movdqa %xmm2, 48(%rcx)
7088 ; SSE2-NEXT: movdqa %xmm3, (%rcx)
7089 ; SSE2-NEXT: movdqa %xmm4, 32(%rcx)
7090 ; SSE2-NEXT: movdqa %xmm0, 16(%rcx)
7093 ; SSE42-LABEL: vec512_v16i32_to_v4i128_factor4:
7095 ; SSE42-NEXT: movdqa (%rdi), %xmm0
7096 ; SSE42-NEXT: paddb (%rsi), %xmm0
7097 ; SSE42-NEXT: pxor %xmm1, %xmm1
7098 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7099 ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
7100 ; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
7101 ; SSE42-NEXT: psrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7102 ; SSE42-NEXT: pxor %xmm4, %xmm4
7103 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3,4,5,6,7]
7104 ; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3,4,5,6,7]
7105 ; SSE42-NEXT: paddb 16(%rdx), %xmm3
7106 ; SSE42-NEXT: paddb 32(%rdx), %xmm2
7107 ; SSE42-NEXT: paddb (%rdx), %xmm1
7108 ; SSE42-NEXT: paddb 48(%rdx), %xmm0
7109 ; SSE42-NEXT: movdqa %xmm0, 48(%rcx)
7110 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
7111 ; SSE42-NEXT: movdqa %xmm2, 32(%rcx)
7112 ; SSE42-NEXT: movdqa %xmm3, 16(%rcx)
7115 ; AVX-LABEL: vec512_v16i32_to_v4i128_factor4:
7117 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7118 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7119 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
7120 ; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
7121 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
7122 ; AVX-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7]
7123 ; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
7124 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
7125 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
7126 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7127 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
7128 ; AVX-NEXT: vpaddb 48(%rdx), %xmm1, %xmm1
7129 ; AVX-NEXT: vpaddb 32(%rdx), %xmm0, %xmm0
7130 ; AVX-NEXT: vextractf128 $1, %ymm2, %xmm3
7131 ; AVX-NEXT: vpaddb 16(%rdx), %xmm3, %xmm3
7132 ; AVX-NEXT: vpaddb (%rdx), %xmm2, %xmm2
7133 ; AVX-NEXT: vmovdqa %xmm2, (%rcx)
7134 ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx)
7135 ; AVX-NEXT: vmovdqa %xmm0, 32(%rcx)
7136 ; AVX-NEXT: vmovdqa %xmm1, 48(%rcx)
7137 ; AVX-NEXT: vzeroupper
7140 ; AVX2-SLOW-LABEL: vec512_v16i32_to_v4i128_factor4:
7141 ; AVX2-SLOW: # %bb.0:
7142 ; AVX2-SLOW-NEXT: vpxor %xmm0, %xmm0, %xmm0
7143 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm1
7144 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %xmm1, %xmm1
7145 ; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
7146 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3]
7147 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3],ymm2[4],ymm0[5,6,7]
7148 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
7149 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
7150 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
7151 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7152 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm2, %ymm1
7153 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
7154 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
7155 ; AVX2-SLOW-NEXT: vzeroupper
7156 ; AVX2-SLOW-NEXT: retq
7158 ; AVX2-FAST-PERLANE-LABEL: vec512_v16i32_to_v4i128_factor4:
7159 ; AVX2-FAST-PERLANE: # %bb.0:
7160 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm0, %xmm0, %xmm0
7161 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm1
7162 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %xmm1, %xmm1
7163 ; AVX2-FAST-PERLANE-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
7164 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3]
7165 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3],ymm2[4],ymm0[5,6,7]
7166 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
7167 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
7168 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
7169 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7170 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm2, %ymm1
7171 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
7172 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
7173 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
7174 ; AVX2-FAST-PERLANE-NEXT: retq
7176 ; AVX2-FAST-LABEL: vec512_v16i32_to_v4i128_factor4:
7177 ; AVX2-FAST: # %bb.0:
7178 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
7179 ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7180 ; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
7181 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,u,u,u,1,u,u,u]
7182 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm2
7183 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7]
7184 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [2,u,u,u,3,u,u,u]
7185 ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm3, %ymm0
7186 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
7187 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7188 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm2, %ymm1
7189 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
7190 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
7191 ; AVX2-FAST-NEXT: vzeroupper
7192 ; AVX2-FAST-NEXT: retq
7194 ; AVX512F-LABEL: vec512_v16i32_to_v4i128_factor4:
7196 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7197 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7198 ; AVX512F-NEXT: movw $4369, %ax # imm = 0x1111
7199 ; AVX512F-NEXT: kmovw %eax, %k1
7200 ; AVX512F-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
7201 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
7202 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
7203 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7204 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7205 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
7206 ; AVX512F-NEXT: vzeroupper
7207 ; AVX512F-NEXT: retq
7209 ; AVX512BW-LABEL: vec512_v16i32_to_v4i128_factor4:
7210 ; AVX512BW: # %bb.0:
7211 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
7212 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7213 ; AVX512BW-NEXT: movb $17, %al
7214 ; AVX512BW-NEXT: kmovd %eax, %k1
7215 ; AVX512BW-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
7216 ; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2
7217 ; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [2,9,10,11,3,13,14,15]
7218 ; AVX512BW-NEXT: vpermi2d %ymm2, %ymm0, %ymm3
7219 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm0
7220 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7221 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7222 ; AVX512BW-NEXT: vzeroupper
7223 ; AVX512BW-NEXT: retq
7224 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7225 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7226 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7227 %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32>
7228 %zextd.vec = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
7229 %out.bytevec = bitcast <16 x i32> %zextd.vec to <64 x i8>
7230 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7231 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7232 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7236 define void @vec512_v16i32_to_v2i256_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7237 ; SSE2-LABEL: vec512_v16i32_to_v2i256_factor8:
7239 ; SSE2-NEXT: movdqa (%rdi), %xmm0
7240 ; SSE2-NEXT: paddb (%rsi), %xmm0
7241 ; SSE2-NEXT: xorps %xmm1, %xmm1
7242 ; SSE2-NEXT: xorps %xmm2, %xmm2
7243 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
7244 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[1,0]
7245 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
7246 ; SSE2-NEXT: movaps 16(%rdx), %xmm1
7247 ; SSE2-NEXT: movaps 48(%rdx), %xmm3
7248 ; SSE2-NEXT: paddb 32(%rdx), %xmm0
7249 ; SSE2-NEXT: paddb (%rdx), %xmm2
7250 ; SSE2-NEXT: movaps %xmm3, 48(%rcx)
7251 ; SSE2-NEXT: movaps %xmm1, 16(%rcx)
7252 ; SSE2-NEXT: movdqa %xmm2, (%rcx)
7253 ; SSE2-NEXT: movdqa %xmm0, 32(%rcx)
7256 ; SSE42-LABEL: vec512_v16i32_to_v2i256_factor8:
7258 ; SSE42-NEXT: movdqa (%rdi), %xmm0
7259 ; SSE42-NEXT: paddb (%rsi), %xmm0
7260 ; SSE42-NEXT: pxor %xmm1, %xmm1
7261 ; SSE42-NEXT: pxor %xmm2, %xmm2
7262 ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
7263 ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
7264 ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7265 ; SSE42-NEXT: movaps 16(%rdx), %xmm1
7266 ; SSE42-NEXT: movaps 48(%rdx), %xmm3
7267 ; SSE42-NEXT: paddb 32(%rdx), %xmm0
7268 ; SSE42-NEXT: paddb (%rdx), %xmm2
7269 ; SSE42-NEXT: movaps %xmm3, 48(%rcx)
7270 ; SSE42-NEXT: movaps %xmm1, 16(%rcx)
7271 ; SSE42-NEXT: movdqa %xmm2, (%rcx)
7272 ; SSE42-NEXT: movdqa %xmm0, 32(%rcx)
7275 ; AVX-LABEL: vec512_v16i32_to_v2i256_factor8:
7277 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7278 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7279 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
7280 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
7281 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
7282 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
7283 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
7284 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7285 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
7286 ; AVX-NEXT: vmovaps 48(%rdx), %xmm3
7287 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
7288 ; AVX-NEXT: vmovaps %xmm3, 48(%rcx)
7289 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7290 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
7293 ; AVX2-SLOW-LABEL: vec512_v16i32_to_v2i256_factor8:
7294 ; AVX2-SLOW: # %bb.0:
7295 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
7296 ; AVX2-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7297 ; AVX2-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
7298 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7299 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
7300 ; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7301 ; AVX2-SLOW-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7302 ; AVX2-SLOW-NEXT: vpaddb (%rdx), %ymm2, %ymm1
7303 ; AVX2-SLOW-NEXT: vmovdqa %ymm1, (%rcx)
7304 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 32(%rcx)
7305 ; AVX2-SLOW-NEXT: vzeroupper
7306 ; AVX2-SLOW-NEXT: retq
7308 ; AVX2-FAST-PERLANE-LABEL: vec512_v16i32_to_v2i256_factor8:
7309 ; AVX2-FAST-PERLANE: # %bb.0:
7310 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0
7311 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7312 ; AVX2-FAST-PERLANE-NEXT: vpxor %xmm1, %xmm1, %xmm1
7313 ; AVX2-FAST-PERLANE-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7314 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7315 ; AVX2-FAST-PERLANE-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7316 ; AVX2-FAST-PERLANE-NEXT: vpaddb (%rdx), %ymm1, %ymm1
7317 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, (%rcx)
7318 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 32(%rcx)
7319 ; AVX2-FAST-PERLANE-NEXT: vzeroupper
7320 ; AVX2-FAST-PERLANE-NEXT: retq
7322 ; AVX2-FAST-LABEL: vec512_v16i32_to_v2i256_factor8:
7323 ; AVX2-FAST: # %bb.0:
7324 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0
7325 ; AVX2-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7326 ; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
7327 ; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7328 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7329 ; AVX2-FAST-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7330 ; AVX2-FAST-NEXT: vpaddb (%rdx), %ymm1, %ymm1
7331 ; AVX2-FAST-NEXT: vmovdqa %ymm1, (%rcx)
7332 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 32(%rcx)
7333 ; AVX2-FAST-NEXT: vzeroupper
7334 ; AVX2-FAST-NEXT: retq
7336 ; AVX512F-LABEL: vec512_v16i32_to_v2i256_factor8:
7338 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7339 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7340 ; AVX512F-NEXT: movw $257, %ax # imm = 0x101
7341 ; AVX512F-NEXT: kmovw %eax, %k1
7342 ; AVX512F-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
7343 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
7344 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
7345 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7346 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7347 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
7348 ; AVX512F-NEXT: vzeroupper
7349 ; AVX512F-NEXT: retq
7351 ; AVX512BW-SLOW-LABEL: vec512_v16i32_to_v2i256_factor8:
7352 ; AVX512BW-SLOW: # %bb.0:
7353 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm0
7354 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7355 ; AVX512BW-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
7356 ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7357 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
7358 ; AVX512BW-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7359 ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
7360 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7361 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
7362 ; AVX512BW-SLOW-NEXT: vzeroupper
7363 ; AVX512BW-SLOW-NEXT: retq
7365 ; AVX512BW-FAST-LABEL: vec512_v16i32_to_v2i256_factor8:
7366 ; AVX512BW-FAST: # %bb.0:
7367 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
7368 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7369 ; AVX512BW-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
7370 ; AVX512BW-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7371 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
7372 ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
7373 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7374 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
7375 ; AVX512BW-FAST-NEXT: vzeroupper
7376 ; AVX512BW-FAST-NEXT: retq
7377 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7378 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7379 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7380 %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32>
7381 %zextd.vec = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 1, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
7382 %out.bytevec = bitcast <16 x i32> %zextd.vec to <64 x i8>
7383 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7384 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7385 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7389 define void @vec512_v16i32_to_v1i512_factor16(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7390 ; SSE2-LABEL: vec512_v16i32_to_v1i512_factor16:
7392 ; SSE2-NEXT: movdqa (%rdi), %xmm0
7393 ; SSE2-NEXT: paddb (%rsi), %xmm0
7394 ; SSE2-NEXT: xorps %xmm1, %xmm1
7395 ; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
7396 ; SSE2-NEXT: movaps 16(%rdx), %xmm0
7397 ; SSE2-NEXT: movaps 32(%rdx), %xmm2
7398 ; SSE2-NEXT: movaps 48(%rdx), %xmm3
7399 ; SSE2-NEXT: paddb (%rdx), %xmm1
7400 ; SSE2-NEXT: movaps %xmm2, 32(%rcx)
7401 ; SSE2-NEXT: movaps %xmm3, 48(%rcx)
7402 ; SSE2-NEXT: movaps %xmm0, 16(%rcx)
7403 ; SSE2-NEXT: movdqa %xmm1, (%rcx)
7406 ; SSE42-LABEL: vec512_v16i32_to_v1i512_factor16:
7408 ; SSE42-NEXT: movdqa (%rdi), %xmm0
7409 ; SSE42-NEXT: paddb (%rsi), %xmm0
7410 ; SSE42-NEXT: pxor %xmm1, %xmm1
7411 ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7412 ; SSE42-NEXT: movaps 16(%rdx), %xmm0
7413 ; SSE42-NEXT: movaps 32(%rdx), %xmm2
7414 ; SSE42-NEXT: movaps 48(%rdx), %xmm3
7415 ; SSE42-NEXT: paddb (%rdx), %xmm1
7416 ; SSE42-NEXT: movaps %xmm2, 32(%rcx)
7417 ; SSE42-NEXT: movaps %xmm3, 48(%rcx)
7418 ; SSE42-NEXT: movaps %xmm0, 16(%rcx)
7419 ; SSE42-NEXT: movdqa %xmm1, (%rcx)
7422 ; AVX-LABEL: vec512_v16i32_to_v1i512_factor16:
7424 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7425 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7426 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
7427 ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
7428 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
7429 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7430 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
7431 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
7432 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
7433 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7434 ; AVX-NEXT: vzeroupper
7437 ; AVX2-LABEL: vec512_v16i32_to_v1i512_factor16:
7439 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
7440 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7441 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
7442 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7443 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
7444 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7445 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
7446 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
7447 ; AVX2-NEXT: vzeroupper
7450 ; AVX512F-LABEL: vec512_v16i32_to_v1i512_factor16:
7452 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7453 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7454 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
7455 ; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7456 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7457 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
7458 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
7459 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7460 ; AVX512F-NEXT: vzeroupper
7461 ; AVX512F-NEXT: retq
7463 ; AVX512BW-LABEL: vec512_v16i32_to_v1i512_factor16:
7464 ; AVX512BW: # %bb.0:
7465 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
7466 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
7467 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
7468 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
7469 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7470 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7471 ; AVX512BW-NEXT: vzeroupper
7472 ; AVX512BW-NEXT: retq
7473 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7474 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7475 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7476 %in.vec.cast = bitcast <64 x i8> %in.vec to <16 x i32>
7477 %zextd.vec = shufflevector <16 x i32> %in.vec.cast, <16 x i32> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
7478 %out.bytevec = bitcast <16 x i32> %zextd.vec to <64 x i8>
7479 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7480 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7481 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7485 define void @vec512_v8i64_to_v4i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7486 ; SSE-LABEL: vec512_v8i64_to_v4i128_factor2:
7488 ; SSE-NEXT: movdqa (%rdi), %xmm0
7489 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
7490 ; SSE-NEXT: paddb (%rsi), %xmm0
7491 ; SSE-NEXT: paddb 16(%rsi), %xmm1
7492 ; SSE-NEXT: movq {{.*#+}} xmm2 = xmm1[0],zero
7493 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
7494 ; SSE-NEXT: movq {{.*#+}} xmm3 = xmm0[0],zero
7495 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
7496 ; SSE-NEXT: paddb 16(%rdx), %xmm0
7497 ; SSE-NEXT: paddb (%rdx), %xmm3
7498 ; SSE-NEXT: paddb 48(%rdx), %xmm1
7499 ; SSE-NEXT: paddb 32(%rdx), %xmm2
7500 ; SSE-NEXT: movdqa %xmm2, 32(%rcx)
7501 ; SSE-NEXT: movdqa %xmm1, 48(%rcx)
7502 ; SSE-NEXT: movdqa %xmm3, (%rcx)
7503 ; SSE-NEXT: movdqa %xmm0, 16(%rcx)
7506 ; AVX-LABEL: vec512_v8i64_to_v4i128_factor2:
7508 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7509 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
7510 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
7511 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7512 ; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
7513 ; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
7514 ; AVX-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[3],ymm2[3]
7515 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
7516 ; AVX-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[3],ymm2[3]
7517 ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
7518 ; AVX-NEXT: vpaddb 48(%rdx), %xmm2, %xmm2
7519 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
7520 ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3
7521 ; AVX-NEXT: vpaddb 16(%rdx), %xmm3, %xmm3
7522 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7523 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7524 ; AVX-NEXT: vmovdqa %xmm3, 16(%rcx)
7525 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
7526 ; AVX-NEXT: vmovdqa %xmm2, 48(%rcx)
7527 ; AVX-NEXT: vzeroupper
7530 ; AVX2-LABEL: vec512_v8i64_to_v4i128_factor2:
7532 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
7533 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7534 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
7535 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[0,1,1,3]
7536 ; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
7537 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,3]
7538 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
7539 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7540 ; AVX2-NEXT: vpaddb (%rdx), %ymm2, %ymm1
7541 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
7542 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
7543 ; AVX2-NEXT: vzeroupper
7546 ; AVX512F-LABEL: vec512_v8i64_to_v4i128_factor2:
7548 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7549 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7550 ; AVX512F-NEXT: movb $85, %al
7551 ; AVX512F-NEXT: kmovw %eax, %k1
7552 ; AVX512F-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
7553 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
7554 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
7555 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7556 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7557 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
7558 ; AVX512F-NEXT: vzeroupper
7559 ; AVX512F-NEXT: retq
7561 ; AVX512BW-SLOW-LABEL: vec512_v8i64_to_v4i128_factor2:
7562 ; AVX512BW-SLOW: # %bb.0:
7563 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm0
7564 ; AVX512BW-SLOW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7565 ; AVX512BW-SLOW-NEXT: movb $5, %al
7566 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1
7567 ; AVX512BW-SLOW-NEXT: vpexpandq %ymm0, %ymm1 {%k1} {z}
7568 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,3]
7569 ; AVX512BW-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
7570 ; AVX512BW-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
7571 ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
7572 ; AVX512BW-SLOW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7573 ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm0, (%rcx)
7574 ; AVX512BW-SLOW-NEXT: vzeroupper
7575 ; AVX512BW-SLOW-NEXT: retq
7577 ; AVX512BW-FAST-LABEL: vec512_v8i64_to_v4i128_factor2:
7578 ; AVX512BW-FAST: # %bb.0:
7579 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm0
7580 ; AVX512BW-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7581 ; AVX512BW-FAST-NEXT: movb $5, %al
7582 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1
7583 ; AVX512BW-FAST-NEXT: vpexpandq %ymm0, %ymm1 {%k1} {z}
7584 ; AVX512BW-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
7585 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [2,5,3,7]
7586 ; AVX512BW-FAST-NEXT: vpermi2q %ymm2, %ymm0, %ymm3
7587 ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm0
7588 ; AVX512BW-FAST-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7589 ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, (%rcx)
7590 ; AVX512BW-FAST-NEXT: vzeroupper
7591 ; AVX512BW-FAST-NEXT: retq
7592 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7593 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7594 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7595 %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64>
7596 %zextd.vec = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
7597 %out.bytevec = bitcast <8 x i64> %zextd.vec to <64 x i8>
7598 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7599 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7600 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7604 define void @vec512_v8i64_to_v2i256_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7605 ; SSE-LABEL: vec512_v8i64_to_v2i256_factor4:
7607 ; SSE-NEXT: movdqa (%rdi), %xmm0
7608 ; SSE-NEXT: paddb (%rsi), %xmm0
7609 ; SSE-NEXT: movq {{.*#+}} xmm1 = xmm0[0],zero
7610 ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
7611 ; SSE-NEXT: movaps 16(%rdx), %xmm2
7612 ; SSE-NEXT: movaps 48(%rdx), %xmm3
7613 ; SSE-NEXT: paddb (%rdx), %xmm1
7614 ; SSE-NEXT: paddb 32(%rdx), %xmm0
7615 ; SSE-NEXT: movaps %xmm3, 48(%rcx)
7616 ; SSE-NEXT: movaps %xmm2, 16(%rcx)
7617 ; SSE-NEXT: movdqa %xmm0, 32(%rcx)
7618 ; SSE-NEXT: movdqa %xmm1, (%rcx)
7621 ; AVX-LABEL: vec512_v8i64_to_v2i256_factor4:
7623 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7624 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7625 ; AVX-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
7626 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
7627 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
7628 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7629 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
7630 ; AVX-NEXT: vmovaps 48(%rdx), %xmm3
7631 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
7632 ; AVX-NEXT: vmovaps %xmm3, 48(%rcx)
7633 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7634 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
7637 ; AVX2-LABEL: vec512_v8i64_to_v2i256_factor4:
7639 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
7640 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7641 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = xmm0[0],zero
7642 ; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
7643 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
7644 ; AVX2-NEXT: vpaddb (%rdx), %ymm1, %ymm1
7645 ; AVX2-NEXT: vmovdqa %ymm1, (%rcx)
7646 ; AVX2-NEXT: vmovdqa %ymm0, 32(%rcx)
7647 ; AVX2-NEXT: vzeroupper
7650 ; AVX512F-LABEL: vec512_v8i64_to_v2i256_factor4:
7652 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7653 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7654 ; AVX512F-NEXT: movb $17, %al
7655 ; AVX512F-NEXT: kmovw %eax, %k1
7656 ; AVX512F-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
7657 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
7658 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
7659 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7660 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7661 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
7662 ; AVX512F-NEXT: vzeroupper
7663 ; AVX512F-NEXT: retq
7665 ; AVX512BW-LABEL: vec512_v8i64_to_v2i256_factor4:
7666 ; AVX512BW: # %bb.0:
7667 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
7668 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7669 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = xmm0[0],zero
7670 ; AVX512BW-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
7671 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
7672 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7673 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7674 ; AVX512BW-NEXT: vzeroupper
7675 ; AVX512BW-NEXT: retq
7676 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7677 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7678 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7679 %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64>
7680 %zextd.vec = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
7681 %out.bytevec = bitcast <8 x i64> %zextd.vec to <64 x i8>
7682 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7683 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7684 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7688 define void @vec512_v8i64_to_v1i512_factor8(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7689 ; SSE-LABEL: vec512_v8i64_to_v1i512_factor8:
7691 ; SSE-NEXT: movdqa (%rdi), %xmm0
7692 ; SSE-NEXT: paddb (%rsi), %xmm0
7693 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
7694 ; SSE-NEXT: movaps 16(%rdx), %xmm1
7695 ; SSE-NEXT: movaps 32(%rdx), %xmm2
7696 ; SSE-NEXT: movaps 48(%rdx), %xmm3
7697 ; SSE-NEXT: paddb (%rdx), %xmm0
7698 ; SSE-NEXT: movaps %xmm2, 32(%rcx)
7699 ; SSE-NEXT: movaps %xmm3, 48(%rcx)
7700 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
7701 ; SSE-NEXT: movdqa %xmm0, (%rcx)
7704 ; AVX-LABEL: vec512_v8i64_to_v1i512_factor8:
7706 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7707 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7708 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
7709 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
7710 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7711 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
7712 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
7713 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
7714 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7715 ; AVX-NEXT: vzeroupper
7718 ; AVX2-LABEL: vec512_v8i64_to_v1i512_factor8:
7720 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
7721 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7722 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
7723 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
7724 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7725 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
7726 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
7727 ; AVX2-NEXT: vzeroupper
7730 ; AVX512F-LABEL: vec512_v8i64_to_v1i512_factor8:
7732 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7733 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7734 ; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
7735 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7736 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
7737 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
7738 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7739 ; AVX512F-NEXT: vzeroupper
7740 ; AVX512F-NEXT: retq
7742 ; AVX512BW-LABEL: vec512_v8i64_to_v1i512_factor8:
7743 ; AVX512BW: # %bb.0:
7744 ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
7745 ; AVX512BW-NEXT: vpaddb (%rsi), %zmm0, %zmm0
7746 ; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
7747 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7748 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7749 ; AVX512BW-NEXT: vzeroupper
7750 ; AVX512BW-NEXT: retq
7751 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7752 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7753 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7754 %in.vec.cast = bitcast <64 x i8> %in.vec to <8 x i64>
7755 %zextd.vec = shufflevector <8 x i64> %in.vec.cast, <8 x i64> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
7756 %out.bytevec = bitcast <8 x i64> %zextd.vec to <64 x i8>
7757 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7758 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7759 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7763 define void @vec512_v4i128_to_v2i256_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7764 ; SSE-LABEL: vec512_v4i128_to_v2i256_factor2:
7766 ; SSE-NEXT: movdqa (%rdi), %xmm0
7767 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
7768 ; SSE-NEXT: paddb 16(%rsi), %xmm1
7769 ; SSE-NEXT: paddb (%rsi), %xmm0
7770 ; SSE-NEXT: movaps 16(%rdx), %xmm2
7771 ; SSE-NEXT: movaps 48(%rdx), %xmm3
7772 ; SSE-NEXT: paddb (%rdx), %xmm0
7773 ; SSE-NEXT: paddb 32(%rdx), %xmm1
7774 ; SSE-NEXT: movaps %xmm3, 48(%rcx)
7775 ; SSE-NEXT: movaps %xmm2, 16(%rcx)
7776 ; SSE-NEXT: movdqa %xmm1, 32(%rcx)
7777 ; SSE-NEXT: movdqa %xmm0, (%rcx)
7780 ; AVX-LABEL: vec512_v4i128_to_v2i256_factor2:
7782 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7783 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
7784 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7785 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
7786 ; AVX-NEXT: vpaddb 32(%rdx), %xmm1, %xmm1
7787 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7788 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
7789 ; AVX-NEXT: vmovaps 48(%rdx), %xmm3
7790 ; AVX-NEXT: vmovaps %xmm3, 48(%rcx)
7791 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
7792 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7793 ; AVX-NEXT: vmovdqa %xmm1, 32(%rcx)
7796 ; AVX2-LABEL: vec512_v4i128_to_v2i256_factor2:
7798 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
7799 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7800 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
7801 ; AVX2-NEXT: vmovdqa %xmm0, %xmm0
7802 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7803 ; AVX2-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
7804 ; AVX2-NEXT: vmovdqa %ymm1, 32(%rcx)
7805 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
7806 ; AVX2-NEXT: vzeroupper
7809 ; AVX512F-LABEL: vec512_v4i128_to_v2i256_factor2:
7811 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7812 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7813 ; AVX512F-NEXT: movb $51, %al
7814 ; AVX512F-NEXT: kmovw %eax, %k1
7815 ; AVX512F-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
7816 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
7817 ; AVX512F-NEXT: vpaddb 32(%rdx), %ymm1, %ymm1
7818 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7819 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7820 ; AVX512F-NEXT: vmovdqa %ymm1, 32(%rcx)
7821 ; AVX512F-NEXT: vzeroupper
7822 ; AVX512F-NEXT: retq
7824 ; AVX512BW-LABEL: vec512_v4i128_to_v2i256_factor2:
7825 ; AVX512BW: # %bb.0:
7826 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
7827 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7828 ; AVX512BW-NEXT: vmovdqa %xmm0, %xmm1
7829 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
7830 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
7831 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7832 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7833 ; AVX512BW-NEXT: vzeroupper
7834 ; AVX512BW-NEXT: retq
7835 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7836 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7837 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7838 %in.vec.cast = bitcast <64 x i8> %in.vec to <4 x i128>
7839 %zextd.vec = shufflevector <4 x i128> %in.vec.cast, <4 x i128> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 1, i32 7>
7840 %out.bytevec = bitcast <4 x i128> %zextd.vec to <64 x i8>
7841 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7842 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7843 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7847 define void @vec512_v4i128_to_v1i512_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7848 ; SSE-LABEL: vec512_v4i128_to_v1i512_factor4:
7850 ; SSE-NEXT: movdqa (%rdi), %xmm0
7851 ; SSE-NEXT: paddb (%rsi), %xmm0
7852 ; SSE-NEXT: movaps 16(%rdx), %xmm1
7853 ; SSE-NEXT: movaps 32(%rdx), %xmm2
7854 ; SSE-NEXT: movaps 48(%rdx), %xmm3
7855 ; SSE-NEXT: paddb (%rdx), %xmm0
7856 ; SSE-NEXT: movaps %xmm2, 32(%rcx)
7857 ; SSE-NEXT: movaps %xmm3, 48(%rcx)
7858 ; SSE-NEXT: movaps %xmm1, 16(%rcx)
7859 ; SSE-NEXT: movdqa %xmm0, (%rcx)
7862 ; AVX-LABEL: vec512_v4i128_to_v1i512_factor4:
7864 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7865 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7866 ; AVX-NEXT: vmovaps 32(%rdx), %ymm1
7867 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7868 ; AVX-NEXT: vmovaps 16(%rdx), %xmm2
7869 ; AVX-NEXT: vmovaps %xmm2, 16(%rcx)
7870 ; AVX-NEXT: vmovaps %ymm1, 32(%rcx)
7871 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7872 ; AVX-NEXT: vzeroupper
7875 ; AVX2-LABEL: vec512_v4i128_to_v1i512_factor4:
7877 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
7878 ; AVX2-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7879 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
7880 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7881 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
7882 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
7883 ; AVX2-NEXT: vzeroupper
7886 ; AVX512F-LABEL: vec512_v4i128_to_v1i512_factor4:
7888 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
7889 ; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7890 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7891 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
7892 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
7893 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7894 ; AVX512F-NEXT: vzeroupper
7895 ; AVX512F-NEXT: retq
7897 ; AVX512BW-LABEL: vec512_v4i128_to_v1i512_factor4:
7898 ; AVX512BW: # %bb.0:
7899 ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0
7900 ; AVX512BW-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7901 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7902 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7903 ; AVX512BW-NEXT: vzeroupper
7904 ; AVX512BW-NEXT: retq
7905 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7906 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7907 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7908 %in.vec.cast = bitcast <64 x i8> %in.vec to <4 x i128>
7909 %zextd.vec = shufflevector <4 x i128> %in.vec.cast, <4 x i128> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
7910 %out.bytevec = bitcast <4 x i128> %zextd.vec to <64 x i8>
7911 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7912 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7913 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7917 define void @vec512_v2i256_to_v1i512_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bias.ptr, ptr %out.vec.bias.ptr, ptr %out.vec.ptr) nounwind {
7918 ; SSE-LABEL: vec512_v2i256_to_v1i512_factor2:
7920 ; SSE-NEXT: movdqa (%rdi), %xmm0
7921 ; SSE-NEXT: movdqa 16(%rdi), %xmm1
7922 ; SSE-NEXT: paddb (%rsi), %xmm0
7923 ; SSE-NEXT: paddb 16(%rsi), %xmm1
7924 ; SSE-NEXT: movaps 32(%rdx), %xmm2
7925 ; SSE-NEXT: movaps 48(%rdx), %xmm3
7926 ; SSE-NEXT: paddb 16(%rdx), %xmm1
7927 ; SSE-NEXT: paddb (%rdx), %xmm0
7928 ; SSE-NEXT: movaps %xmm2, 32(%rcx)
7929 ; SSE-NEXT: movaps %xmm3, 48(%rcx)
7930 ; SSE-NEXT: movdqa %xmm0, (%rcx)
7931 ; SSE-NEXT: movdqa %xmm1, 16(%rcx)
7934 ; AVX-LABEL: vec512_v2i256_to_v1i512_factor2:
7936 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
7937 ; AVX-NEXT: vmovdqa 16(%rdi), %xmm1
7938 ; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
7939 ; AVX-NEXT: vpaddb 16(%rsi), %xmm1, %xmm1
7940 ; AVX-NEXT: vmovaps 32(%rdx), %ymm2
7941 ; AVX-NEXT: vpaddb 16(%rdx), %xmm1, %xmm1
7942 ; AVX-NEXT: vpaddb (%rdx), %xmm0, %xmm0
7943 ; AVX-NEXT: vmovaps %ymm2, 32(%rcx)
7944 ; AVX-NEXT: vmovdqa %xmm0, (%rcx)
7945 ; AVX-NEXT: vmovdqa %xmm1, 16(%rcx)
7946 ; AVX-NEXT: vzeroupper
7949 ; AVX2-LABEL: vec512_v2i256_to_v1i512_factor2:
7951 ; AVX2-NEXT: vmovdqa (%rdi), %ymm0
7952 ; AVX2-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7953 ; AVX2-NEXT: vmovaps 32(%rdx), %ymm1
7954 ; AVX2-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7955 ; AVX2-NEXT: vmovaps %ymm1, 32(%rcx)
7956 ; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
7957 ; AVX2-NEXT: vzeroupper
7960 ; AVX512F-LABEL: vec512_v2i256_to_v1i512_factor2:
7962 ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
7963 ; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7964 ; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
7965 ; AVX512F-NEXT: vmovaps 32(%rdx), %ymm1
7966 ; AVX512F-NEXT: vmovaps %ymm1, 32(%rcx)
7967 ; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
7968 ; AVX512F-NEXT: vzeroupper
7969 ; AVX512F-NEXT: retq
7971 ; AVX512BW-LABEL: vec512_v2i256_to_v1i512_factor2:
7972 ; AVX512BW: # %bb.0:
7973 ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0
7974 ; AVX512BW-NEXT: vpaddb (%rsi), %ymm0, %ymm0
7975 ; AVX512BW-NEXT: vpaddb (%rdx), %zmm0, %zmm0
7976 ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rcx)
7977 ; AVX512BW-NEXT: vzeroupper
7978 ; AVX512BW-NEXT: retq
7979 %in.vec.base = load <64 x i8>, ptr %in.vec.base.ptr, align 64
7980 %in.vec.bias = load <64 x i8>, ptr %in.vec.bias.ptr, align 64
7981 %in.vec = add <64 x i8> %in.vec.base, %in.vec.bias
7982 %in.vec.cast = bitcast <64 x i8> %in.vec to <2 x i256>
7983 %zextd.vec = shufflevector <2 x i256> %in.vec.cast, <2 x i256> zeroinitializer, <2 x i32> <i32 0, i32 3>
7984 %out.bytevec = bitcast <2 x i256> %zextd.vec to <64 x i8>
7985 %out.vec.bias = load <64 x i8>, ptr %out.vec.bias.ptr, align 64
7986 %out.vec = add <64 x i8> %out.bytevec, %out.vec.bias
7987 store <64 x i8> %out.vec, ptr %out.vec.ptr, align 64
7990 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: