1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=XOP,XOP-FALLBACK
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512F
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512VL,AVX512VL-FALLBACK
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW-FALLBACK
12 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512VL,AVX512VLBW
14 ; These test cases are inspired by C++2a std::midpoint().
15 ; See https://bugs.llvm.org/show_bug.cgi?id=40965
17 ; Using 128-bit vector regs.
19 ; ---------------------------------------------------------------------------- ;
20 ; 32-bit width. 128 / 32 = 4 elts.
21 ; ---------------------------------------------------------------------------- ;
23 ; Values come from regs
25 define <4 x i32> @vec128_i32_signed_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
26 ; SSE2-LABEL: vec128_i32_signed_reg_reg:
28 ; SSE2-NEXT: movdqa %xmm0, %xmm2
29 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
30 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
31 ; SSE2-NEXT: por %xmm2, %xmm3
32 ; SSE2-NEXT: movdqa %xmm0, %xmm4
33 ; SSE2-NEXT: psubd %xmm1, %xmm4
34 ; SSE2-NEXT: psubd %xmm0, %xmm1
35 ; SSE2-NEXT: pand %xmm2, %xmm4
36 ; SSE2-NEXT: pandn %xmm1, %xmm2
37 ; SSE2-NEXT: por %xmm4, %xmm2
38 ; SSE2-NEXT: psrld $1, %xmm2
39 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
40 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
41 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
42 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
43 ; SSE2-NEXT: pmuludq %xmm1, %xmm3
44 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
45 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
46 ; SSE2-NEXT: paddd %xmm2, %xmm0
49 ; SSE41-LABEL: vec128_i32_signed_reg_reg:
51 ; SSE41-NEXT: movdqa %xmm0, %xmm2
52 ; SSE41-NEXT: pcmpgtd %xmm1, %xmm2
53 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
54 ; SSE41-NEXT: movdqa %xmm0, %xmm3
55 ; SSE41-NEXT: pminsd %xmm1, %xmm3
56 ; SSE41-NEXT: pmaxsd %xmm0, %xmm1
57 ; SSE41-NEXT: psubd %xmm3, %xmm1
58 ; SSE41-NEXT: psrld $1, %xmm1
59 ; SSE41-NEXT: pmulld %xmm1, %xmm2
60 ; SSE41-NEXT: paddd %xmm2, %xmm0
63 ; AVX1-LABEL: vec128_i32_signed_reg_reg:
65 ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
66 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
67 ; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
68 ; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
69 ; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
70 ; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
71 ; AVX1-NEXT: vpmulld %xmm2, %xmm1, %xmm1
72 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
75 ; AVX2-LABEL: vec128_i32_signed_reg_reg:
77 ; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
78 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
79 ; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
80 ; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
81 ; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
82 ; AVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
83 ; AVX2-NEXT: vpsrld $1, %xmm1, %xmm1
84 ; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
85 ; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0
88 ; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
89 ; XOP-FALLBACK: # %bb.0:
90 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
91 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
92 ; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
93 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
94 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
95 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
96 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
97 ; XOP-FALLBACK-NEXT: retq
99 ; XOPAVX1-LABEL: vec128_i32_signed_reg_reg:
101 ; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
102 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
103 ; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
104 ; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
105 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
106 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
107 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
110 ; XOPAVX2-LABEL: vec128_i32_signed_reg_reg:
112 ; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
113 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
114 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
115 ; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
116 ; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
117 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
118 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
119 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
122 ; AVX512F-LABEL: vec128_i32_signed_reg_reg:
124 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
125 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
126 ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
127 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
128 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
129 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
130 ; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
131 ; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
132 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
133 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
134 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
135 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
136 ; AVX512F-NEXT: vzeroupper
139 ; AVX512VL-LABEL: vec128_i32_signed_reg_reg:
141 ; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
142 ; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
143 ; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
144 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
145 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
146 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
147 ; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
148 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
149 ; AVX512VL-NEXT: retq
151 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
152 ; AVX512BW-FALLBACK: # %bb.0:
153 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
154 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
155 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
156 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
157 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
158 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
159 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
160 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
161 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
162 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
163 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
164 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
165 ; AVX512BW-FALLBACK-NEXT: vzeroupper
166 ; AVX512BW-FALLBACK-NEXT: retq
167 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
168 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
169 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
170 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
171 %t7 = sub <4 x i32> %t6, %t5
172 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
173 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
174 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
178 define <4 x i32> @vec128_i32_unsigned_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
179 ; SSE2-LABEL: vec128_i32_unsigned_reg_reg:
181 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
182 ; SSE2-NEXT: movdqa %xmm1, %xmm3
183 ; SSE2-NEXT: pxor %xmm2, %xmm3
184 ; SSE2-NEXT: pxor %xmm0, %xmm2
185 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2
186 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
187 ; SSE2-NEXT: por %xmm2, %xmm3
188 ; SSE2-NEXT: movdqa %xmm0, %xmm4
189 ; SSE2-NEXT: psubd %xmm1, %xmm4
190 ; SSE2-NEXT: psubd %xmm0, %xmm1
191 ; SSE2-NEXT: pand %xmm2, %xmm4
192 ; SSE2-NEXT: pandn %xmm1, %xmm2
193 ; SSE2-NEXT: por %xmm4, %xmm2
194 ; SSE2-NEXT: psrld $1, %xmm2
195 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
196 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
197 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
198 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
199 ; SSE2-NEXT: pmuludq %xmm1, %xmm3
200 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
201 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
202 ; SSE2-NEXT: paddd %xmm2, %xmm0
205 ; SSE41-LABEL: vec128_i32_unsigned_reg_reg:
207 ; SSE41-NEXT: movdqa %xmm0, %xmm2
208 ; SSE41-NEXT: pminud %xmm1, %xmm2
209 ; SSE41-NEXT: movdqa %xmm0, %xmm3
210 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
211 ; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
212 ; SSE41-NEXT: pxor %xmm3, %xmm4
213 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
214 ; SSE41-NEXT: pmaxud %xmm0, %xmm1
215 ; SSE41-NEXT: psubd %xmm2, %xmm1
216 ; SSE41-NEXT: psrld $1, %xmm1
217 ; SSE41-NEXT: pmulld %xmm1, %xmm4
218 ; SSE41-NEXT: paddd %xmm4, %xmm0
221 ; AVX1-LABEL: vec128_i32_unsigned_reg_reg:
223 ; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm2
224 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm3
225 ; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
226 ; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
227 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
228 ; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
229 ; AVX1-NEXT: vpsubd %xmm2, %xmm1, %xmm1
230 ; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
231 ; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
232 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
235 ; AVX2-LABEL: vec128_i32_unsigned_reg_reg:
237 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm2
238 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm3
239 ; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
240 ; AVX2-NEXT: vpxor %xmm4, %xmm3, %xmm3
241 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [1,1,1,1]
242 ; AVX2-NEXT: vpor %xmm4, %xmm3, %xmm3
243 ; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
244 ; AVX2-NEXT: vpsubd %xmm2, %xmm1, %xmm1
245 ; AVX2-NEXT: vpsrld $1, %xmm1, %xmm1
246 ; AVX2-NEXT: vpmulld %xmm3, %xmm1, %xmm1
247 ; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0
250 ; XOP-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
251 ; XOP-FALLBACK: # %bb.0:
252 ; XOP-FALLBACK-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
253 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
254 ; XOP-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm3
255 ; XOP-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
256 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
257 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
258 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
259 ; XOP-FALLBACK-NEXT: retq
261 ; XOPAVX1-LABEL: vec128_i32_unsigned_reg_reg:
263 ; XOPAVX1-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
264 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
265 ; XOPAVX1-NEXT: vpminud %xmm1, %xmm0, %xmm3
266 ; XOPAVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
267 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
268 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
269 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
272 ; XOPAVX2-LABEL: vec128_i32_unsigned_reg_reg:
274 ; XOPAVX2-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
275 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
276 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
277 ; XOPAVX2-NEXT: vpminud %xmm1, %xmm0, %xmm3
278 ; XOPAVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
279 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
280 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
281 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
284 ; AVX512F-LABEL: vec128_i32_unsigned_reg_reg:
286 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
287 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
288 ; AVX512F-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
289 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
290 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
291 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
292 ; AVX512F-NEXT: vpminud %xmm1, %xmm0, %xmm2
293 ; AVX512F-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
294 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
295 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
296 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
297 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
298 ; AVX512F-NEXT: vzeroupper
301 ; AVX512VL-LABEL: vec128_i32_unsigned_reg_reg:
303 ; AVX512VL-NEXT: vpcmpnleud %xmm1, %xmm0, %k1
304 ; AVX512VL-NEXT: vpminud %xmm1, %xmm0, %xmm2
305 ; AVX512VL-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
306 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
307 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
308 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
309 ; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
310 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
311 ; AVX512VL-NEXT: retq
313 ; AVX512BW-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
314 ; AVX512BW-FALLBACK: # %bb.0:
315 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
316 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
317 ; AVX512BW-FALLBACK-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
318 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
319 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
320 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
321 ; AVX512BW-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm2
322 ; AVX512BW-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
323 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
324 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
325 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
326 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
327 ; AVX512BW-FALLBACK-NEXT: vzeroupper
328 ; AVX512BW-FALLBACK-NEXT: retq
329 %t3 = icmp ugt <4 x i32> %a1, %a2
330 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
331 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
332 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
333 %t7 = sub <4 x i32> %t6, %t5
334 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
335 %t9 = mul <4 x i32> %t8, %t4
336 %a10 = add <4 x i32> %t9, %a1
340 ; Values are loaded. Only check signed case.
342 define <4 x i32> @vec128_i32_signed_mem_reg(ptr %a1_addr, <4 x i32> %a2) nounwind {
343 ; SSE2-LABEL: vec128_i32_signed_mem_reg:
345 ; SSE2-NEXT: movdqa (%rdi), %xmm1
346 ; SSE2-NEXT: movdqa %xmm1, %xmm2
347 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
348 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
349 ; SSE2-NEXT: por %xmm2, %xmm3
350 ; SSE2-NEXT: movdqa %xmm1, %xmm4
351 ; SSE2-NEXT: psubd %xmm0, %xmm4
352 ; SSE2-NEXT: psubd %xmm1, %xmm0
353 ; SSE2-NEXT: pand %xmm2, %xmm4
354 ; SSE2-NEXT: pandn %xmm0, %xmm2
355 ; SSE2-NEXT: por %xmm4, %xmm2
356 ; SSE2-NEXT: psrld $1, %xmm2
357 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
358 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
359 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
360 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
361 ; SSE2-NEXT: pmuludq %xmm4, %xmm2
362 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
363 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
364 ; SSE2-NEXT: paddd %xmm1, %xmm0
367 ; SSE41-LABEL: vec128_i32_signed_mem_reg:
369 ; SSE41-NEXT: movdqa (%rdi), %xmm1
370 ; SSE41-NEXT: movdqa %xmm1, %xmm2
371 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
372 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
373 ; SSE41-NEXT: movdqa %xmm1, %xmm3
374 ; SSE41-NEXT: pminsd %xmm0, %xmm3
375 ; SSE41-NEXT: pmaxsd %xmm1, %xmm0
376 ; SSE41-NEXT: psubd %xmm3, %xmm0
377 ; SSE41-NEXT: psrld $1, %xmm0
378 ; SSE41-NEXT: pmulld %xmm2, %xmm0
379 ; SSE41-NEXT: paddd %xmm1, %xmm0
382 ; AVX1-LABEL: vec128_i32_signed_mem_reg:
384 ; AVX1-NEXT: vmovdqa (%rdi), %xmm1
385 ; AVX1-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2
386 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
387 ; AVX1-NEXT: vpminsd %xmm0, %xmm1, %xmm3
388 ; AVX1-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
389 ; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
390 ; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
391 ; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm0
392 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
395 ; AVX2-LABEL: vec128_i32_signed_mem_reg:
397 ; AVX2-NEXT: vmovdqa (%rdi), %xmm1
398 ; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2
399 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
400 ; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
401 ; AVX2-NEXT: vpminsd %xmm0, %xmm1, %xmm3
402 ; AVX2-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
403 ; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
404 ; AVX2-NEXT: vpsrld $1, %xmm0, %xmm0
405 ; AVX2-NEXT: vpmulld %xmm2, %xmm0, %xmm0
406 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
409 ; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
410 ; XOP-FALLBACK: # %bb.0:
411 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
412 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
413 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
414 ; XOP-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm3
415 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
416 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm0, %xmm0
417 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
418 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
419 ; XOP-FALLBACK-NEXT: retq
421 ; XOPAVX1-LABEL: vec128_i32_signed_mem_reg:
423 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
424 ; XOPAVX1-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
425 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
426 ; XOPAVX1-NEXT: vpminsd %xmm0, %xmm1, %xmm3
427 ; XOPAVX1-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
428 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
429 ; XOPAVX1-NEXT: vpsrld $1, %xmm0, %xmm0
430 ; XOPAVX1-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
433 ; XOPAVX2-LABEL: vec128_i32_signed_mem_reg:
435 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
436 ; XOPAVX2-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
437 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
438 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
439 ; XOPAVX2-NEXT: vpminsd %xmm0, %xmm1, %xmm3
440 ; XOPAVX2-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
441 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
442 ; XOPAVX2-NEXT: vpsrld $1, %xmm0, %xmm0
443 ; XOPAVX2-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
446 ; AVX512F-LABEL: vec128_i32_signed_mem_reg:
448 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
449 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
450 ; AVX512F-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
451 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
452 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
453 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
454 ; AVX512F-NEXT: vpminsd %xmm0, %xmm1, %xmm2
455 ; AVX512F-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
456 ; AVX512F-NEXT: vpsubd %xmm2, %xmm0, %xmm0
457 ; AVX512F-NEXT: vpsrld $1, %xmm0, %xmm0
458 ; AVX512F-NEXT: vpmulld %xmm3, %xmm0, %xmm0
459 ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0
460 ; AVX512F-NEXT: vzeroupper
463 ; AVX512VL-LABEL: vec128_i32_signed_mem_reg:
465 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
466 ; AVX512VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
467 ; AVX512VL-NEXT: vpminsd %xmm0, %xmm1, %xmm2
468 ; AVX512VL-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
469 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
470 ; AVX512VL-NEXT: vpsrld $1, %xmm0, %xmm0
471 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
472 ; AVX512VL-NEXT: vpsubd %xmm0, %xmm2, %xmm0 {%k1}
473 ; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
474 ; AVX512VL-NEXT: retq
476 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
477 ; AVX512BW-FALLBACK: # %bb.0:
478 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
479 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
480 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
481 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
482 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
483 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
484 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm2
485 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
486 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm0, %xmm0
487 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
488 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm0, %xmm0
489 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
490 ; AVX512BW-FALLBACK-NEXT: vzeroupper
491 ; AVX512BW-FALLBACK-NEXT: retq
492 %a1 = load <4 x i32>, ptr %a1_addr
493 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
494 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
495 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
496 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
497 %t7 = sub <4 x i32> %t6, %t5
498 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
499 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
500 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
504 define <4 x i32> @vec128_i32_signed_reg_mem(<4 x i32> %a1, ptr %a2_addr) nounwind {
505 ; SSE2-LABEL: vec128_i32_signed_reg_mem:
507 ; SSE2-NEXT: movdqa (%rdi), %xmm1
508 ; SSE2-NEXT: movdqa %xmm0, %xmm2
509 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
510 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
511 ; SSE2-NEXT: por %xmm2, %xmm3
512 ; SSE2-NEXT: movdqa %xmm0, %xmm4
513 ; SSE2-NEXT: psubd %xmm1, %xmm4
514 ; SSE2-NEXT: psubd %xmm0, %xmm1
515 ; SSE2-NEXT: pand %xmm2, %xmm4
516 ; SSE2-NEXT: pandn %xmm1, %xmm2
517 ; SSE2-NEXT: por %xmm4, %xmm2
518 ; SSE2-NEXT: psrld $1, %xmm2
519 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
520 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
521 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
522 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
523 ; SSE2-NEXT: pmuludq %xmm1, %xmm3
524 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
525 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
526 ; SSE2-NEXT: paddd %xmm2, %xmm0
529 ; SSE41-LABEL: vec128_i32_signed_reg_mem:
531 ; SSE41-NEXT: movdqa (%rdi), %xmm1
532 ; SSE41-NEXT: movdqa %xmm0, %xmm2
533 ; SSE41-NEXT: pcmpgtd %xmm1, %xmm2
534 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
535 ; SSE41-NEXT: movdqa %xmm0, %xmm3
536 ; SSE41-NEXT: pminsd %xmm1, %xmm3
537 ; SSE41-NEXT: pmaxsd %xmm0, %xmm1
538 ; SSE41-NEXT: psubd %xmm3, %xmm1
539 ; SSE41-NEXT: psrld $1, %xmm1
540 ; SSE41-NEXT: pmulld %xmm2, %xmm1
541 ; SSE41-NEXT: paddd %xmm1, %xmm0
544 ; AVX1-LABEL: vec128_i32_signed_reg_mem:
546 ; AVX1-NEXT: vmovdqa (%rdi), %xmm1
547 ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
548 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
549 ; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
550 ; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
551 ; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
552 ; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
553 ; AVX1-NEXT: vpmulld %xmm2, %xmm1, %xmm1
554 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
557 ; AVX2-LABEL: vec128_i32_signed_reg_mem:
559 ; AVX2-NEXT: vmovdqa (%rdi), %xmm1
560 ; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
561 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
562 ; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
563 ; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
564 ; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
565 ; AVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
566 ; AVX2-NEXT: vpsrld $1, %xmm1, %xmm1
567 ; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
568 ; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0
571 ; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
572 ; XOP-FALLBACK: # %bb.0:
573 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
574 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
575 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
576 ; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
577 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
578 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
579 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
580 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
581 ; XOP-FALLBACK-NEXT: retq
583 ; XOPAVX1-LABEL: vec128_i32_signed_reg_mem:
585 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
586 ; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
587 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
588 ; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
589 ; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
590 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
591 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
592 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
595 ; XOPAVX2-LABEL: vec128_i32_signed_reg_mem:
597 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
598 ; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
599 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
600 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
601 ; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
602 ; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
603 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
604 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
605 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
608 ; AVX512F-LABEL: vec128_i32_signed_reg_mem:
610 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
611 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
612 ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
613 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
614 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
615 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
616 ; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
617 ; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
618 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
619 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
620 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
621 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
622 ; AVX512F-NEXT: vzeroupper
625 ; AVX512VL-LABEL: vec128_i32_signed_reg_mem:
627 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
628 ; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
629 ; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
630 ; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
631 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
632 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
633 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
634 ; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
635 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
636 ; AVX512VL-NEXT: retq
638 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
639 ; AVX512BW-FALLBACK: # %bb.0:
640 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
641 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
642 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
643 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
644 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
645 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
646 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
647 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
648 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
649 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
650 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
651 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
652 ; AVX512BW-FALLBACK-NEXT: vzeroupper
653 ; AVX512BW-FALLBACK-NEXT: retq
654 %a2 = load <4 x i32>, ptr %a2_addr
655 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
656 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
657 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
658 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
659 %t7 = sub <4 x i32> %t6, %t5
660 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
661 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
662 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
666 define <4 x i32> @vec128_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
667 ; SSE2-LABEL: vec128_i32_signed_mem_mem:
669 ; SSE2-NEXT: movdqa (%rdi), %xmm1
670 ; SSE2-NEXT: movdqa (%rsi), %xmm0
671 ; SSE2-NEXT: movdqa %xmm1, %xmm2
672 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
673 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
674 ; SSE2-NEXT: por %xmm2, %xmm3
675 ; SSE2-NEXT: movdqa %xmm1, %xmm4
676 ; SSE2-NEXT: psubd %xmm0, %xmm4
677 ; SSE2-NEXT: psubd %xmm1, %xmm0
678 ; SSE2-NEXT: pand %xmm2, %xmm4
679 ; SSE2-NEXT: pandn %xmm0, %xmm2
680 ; SSE2-NEXT: por %xmm4, %xmm2
681 ; SSE2-NEXT: psrld $1, %xmm2
682 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
683 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
684 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
685 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
686 ; SSE2-NEXT: pmuludq %xmm4, %xmm2
687 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
688 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
689 ; SSE2-NEXT: paddd %xmm1, %xmm0
692 ; SSE41-LABEL: vec128_i32_signed_mem_mem:
694 ; SSE41-NEXT: movdqa (%rdi), %xmm1
695 ; SSE41-NEXT: movdqa (%rsi), %xmm0
696 ; SSE41-NEXT: movdqa %xmm1, %xmm2
697 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
698 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
699 ; SSE41-NEXT: movdqa %xmm1, %xmm3
700 ; SSE41-NEXT: pminsd %xmm0, %xmm3
701 ; SSE41-NEXT: pmaxsd %xmm1, %xmm0
702 ; SSE41-NEXT: psubd %xmm3, %xmm0
703 ; SSE41-NEXT: psrld $1, %xmm0
704 ; SSE41-NEXT: pmulld %xmm2, %xmm0
705 ; SSE41-NEXT: paddd %xmm1, %xmm0
708 ; AVX1-LABEL: vec128_i32_signed_mem_mem:
710 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
711 ; AVX1-NEXT: vmovdqa (%rsi), %xmm1
712 ; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
713 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
714 ; AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
715 ; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
716 ; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
717 ; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
718 ; AVX1-NEXT: vpmulld %xmm2, %xmm1, %xmm1
719 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
722 ; AVX2-LABEL: vec128_i32_signed_mem_mem:
724 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
725 ; AVX2-NEXT: vmovdqa (%rsi), %xmm1
726 ; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
727 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
728 ; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
729 ; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
730 ; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
731 ; AVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
732 ; AVX2-NEXT: vpsrld $1, %xmm1, %xmm1
733 ; AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
734 ; AVX2-NEXT: vpaddd %xmm0, %xmm1, %xmm0
737 ; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
738 ; XOP-FALLBACK: # %bb.0:
739 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
740 ; XOP-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
741 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
742 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
743 ; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
744 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
745 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
746 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
747 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
748 ; XOP-FALLBACK-NEXT: retq
750 ; XOPAVX1-LABEL: vec128_i32_signed_mem_mem:
752 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm0
753 ; XOPAVX1-NEXT: vmovdqa (%rsi), %xmm1
754 ; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
755 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
756 ; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
757 ; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
758 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
759 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
760 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
763 ; XOPAVX2-LABEL: vec128_i32_signed_mem_mem:
765 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm0
766 ; XOPAVX2-NEXT: vmovdqa (%rsi), %xmm1
767 ; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
768 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
769 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
770 ; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
771 ; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
772 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
773 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
774 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
777 ; AVX512F-LABEL: vec128_i32_signed_mem_mem:
779 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
780 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
781 ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
782 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
783 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
784 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
785 ; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
786 ; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
787 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
788 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
789 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
790 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
791 ; AVX512F-NEXT: vzeroupper
794 ; AVX512VL-LABEL: vec128_i32_signed_mem_mem:
796 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
797 ; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
798 ; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
799 ; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
800 ; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
801 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
802 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
803 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
804 ; AVX512VL-NEXT: vpsubd %xmm1, %xmm2, %xmm1 {%k1}
805 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
806 ; AVX512VL-NEXT: retq
808 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
809 ; AVX512BW-FALLBACK: # %bb.0:
810 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
811 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
812 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
813 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
814 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
815 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
816 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
817 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
818 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
819 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
820 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
821 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
822 ; AVX512BW-FALLBACK-NEXT: vzeroupper
823 ; AVX512BW-FALLBACK-NEXT: retq
824 %a1 = load <4 x i32>, ptr %a1_addr
825 %a2 = load <4 x i32>, ptr %a2_addr
826 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
827 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
828 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
829 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
830 %t7 = sub <4 x i32> %t6, %t5
831 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
832 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
833 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
837 ; ---------------------------------------------------------------------------- ;
838 ; 64-bit width. 128 / 64 = 2 elts.
839 ; ---------------------------------------------------------------------------- ;
841 ; Values come from regs
843 define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
844 ; SSE2-LABEL: vec128_i64_signed_reg_reg:
846 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
847 ; SSE2-NEXT: movdqa %xmm1, %xmm3
848 ; SSE2-NEXT: pxor %xmm2, %xmm3
849 ; SSE2-NEXT: pxor %xmm0, %xmm2
850 ; SSE2-NEXT: movdqa %xmm2, %xmm4
851 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
852 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
853 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
854 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
855 ; SSE2-NEXT: pand %xmm5, %xmm2
856 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
857 ; SSE2-NEXT: por %xmm2, %xmm3
858 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
859 ; SSE2-NEXT: por %xmm3, %xmm2
860 ; SSE2-NEXT: movdqa %xmm3, %xmm4
861 ; SSE2-NEXT: pandn %xmm0, %xmm4
862 ; SSE2-NEXT: movdqa %xmm3, %xmm5
863 ; SSE2-NEXT: pandn %xmm1, %xmm5
864 ; SSE2-NEXT: pand %xmm3, %xmm1
865 ; SSE2-NEXT: por %xmm4, %xmm1
866 ; SSE2-NEXT: pand %xmm0, %xmm3
867 ; SSE2-NEXT: por %xmm5, %xmm3
868 ; SSE2-NEXT: psubq %xmm1, %xmm3
869 ; SSE2-NEXT: movdqa %xmm3, %xmm1
870 ; SSE2-NEXT: psrlq $1, %xmm1
871 ; SSE2-NEXT: psrlq $33, %xmm3
872 ; SSE2-NEXT: pmuludq %xmm2, %xmm3
873 ; SSE2-NEXT: movdqa %xmm2, %xmm4
874 ; SSE2-NEXT: psrlq $32, %xmm4
875 ; SSE2-NEXT: pmuludq %xmm1, %xmm4
876 ; SSE2-NEXT: paddq %xmm3, %xmm4
877 ; SSE2-NEXT: psllq $32, %xmm4
878 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
879 ; SSE2-NEXT: paddq %xmm1, %xmm0
880 ; SSE2-NEXT: paddq %xmm4, %xmm0
883 ; SSE41-LABEL: vec128_i64_signed_reg_reg:
885 ; SSE41-NEXT: movdqa %xmm0, %xmm2
886 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
887 ; SSE41-NEXT: movdqa %xmm1, %xmm3
888 ; SSE41-NEXT: pxor %xmm0, %xmm3
889 ; SSE41-NEXT: pxor %xmm2, %xmm0
890 ; SSE41-NEXT: movdqa %xmm0, %xmm4
891 ; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
892 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
893 ; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
894 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
895 ; SSE41-NEXT: pand %xmm5, %xmm3
896 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
897 ; SSE41-NEXT: por %xmm3, %xmm6
898 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
899 ; SSE41-NEXT: pand %xmm5, %xmm0
900 ; SSE41-NEXT: por %xmm4, %xmm0
901 ; SSE41-NEXT: movdqa %xmm2, %xmm3
902 ; SSE41-NEXT: psubq %xmm1, %xmm3
903 ; SSE41-NEXT: psubq %xmm2, %xmm1
904 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
905 ; SSE41-NEXT: movapd %xmm1, %xmm0
906 ; SSE41-NEXT: psrlq $1, %xmm0
907 ; SSE41-NEXT: psrlq $33, %xmm1
908 ; SSE41-NEXT: pmuludq %xmm6, %xmm1
909 ; SSE41-NEXT: movdqa %xmm6, %xmm3
910 ; SSE41-NEXT: psrlq $32, %xmm3
911 ; SSE41-NEXT: pmuludq %xmm0, %xmm3
912 ; SSE41-NEXT: paddq %xmm1, %xmm3
913 ; SSE41-NEXT: psllq $32, %xmm3
914 ; SSE41-NEXT: pmuludq %xmm6, %xmm0
915 ; SSE41-NEXT: paddq %xmm2, %xmm0
916 ; SSE41-NEXT: paddq %xmm3, %xmm0
919 ; AVX-LABEL: vec128_i64_signed_reg_reg:
921 ; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
922 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
923 ; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm4
924 ; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
925 ; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
926 ; AVX-NEXT: vpsrlq $1, %xmm1, %xmm2
927 ; AVX-NEXT: vpsrlq $33, %xmm1, %xmm1
928 ; AVX-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
929 ; AVX-NEXT: vpsrlq $32, %xmm3, %xmm4
930 ; AVX-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
931 ; AVX-NEXT: vpaddq %xmm1, %xmm4, %xmm1
932 ; AVX-NEXT: vpsllq $32, %xmm1, %xmm1
933 ; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
934 ; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
935 ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
938 ; XOP-LABEL: vec128_i64_signed_reg_reg:
940 ; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
941 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
942 ; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
943 ; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
944 ; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
945 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
946 ; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
947 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
948 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm4
949 ; XOP-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
950 ; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1
951 ; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
952 ; XOP-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
953 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
954 ; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
957 ; AVX512F-LABEL: vec128_i64_signed_reg_reg:
959 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
960 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
961 ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
962 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
963 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
964 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
965 ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
966 ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
967 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
968 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm2
969 ; AVX512F-NEXT: vpsrlq $33, %xmm1, %xmm1
970 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
971 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
972 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
973 ; AVX512F-NEXT: vpaddq %xmm1, %xmm4, %xmm1
974 ; AVX512F-NEXT: vpsllq $32, %xmm1, %xmm1
975 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
976 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
977 ; AVX512F-NEXT: vpaddq %xmm1, %xmm0, %xmm0
978 ; AVX512F-NEXT: vzeroupper
981 ; AVX512VL-LABEL: vec128_i64_signed_reg_reg:
983 ; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
984 ; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
985 ; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
986 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
987 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
988 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
989 ; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
990 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
991 ; AVX512VL-NEXT: retq
993 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
994 ; AVX512BW-FALLBACK: # %bb.0:
995 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
996 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
997 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
998 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
999 ; AVX512BW-FALLBACK-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1000 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1001 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1002 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1003 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1004 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm2
1005 ; AVX512BW-FALLBACK-NEXT: vpsrlq $33, %xmm1, %xmm1
1006 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1007 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1008 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1009 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1010 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm1, %xmm1
1011 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1012 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1013 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1014 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1015 ; AVX512BW-FALLBACK-NEXT: retq
1016 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
1017 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1018 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1019 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1020 %t7 = sub <2 x i64> %t6, %t5
1021 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1022 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
1023 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
1027 define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
1028 ; SSE2-LABEL: vec128_i64_unsigned_reg_reg:
1030 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
1031 ; SSE2-NEXT: movdqa %xmm1, %xmm3
1032 ; SSE2-NEXT: pxor %xmm2, %xmm3
1033 ; SSE2-NEXT: pxor %xmm0, %xmm2
1034 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1035 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
1036 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1037 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
1038 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1039 ; SSE2-NEXT: pand %xmm5, %xmm2
1040 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
1041 ; SSE2-NEXT: por %xmm2, %xmm3
1042 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
1043 ; SSE2-NEXT: por %xmm3, %xmm2
1044 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1045 ; SSE2-NEXT: pandn %xmm0, %xmm4
1046 ; SSE2-NEXT: movdqa %xmm3, %xmm5
1047 ; SSE2-NEXT: pandn %xmm1, %xmm5
1048 ; SSE2-NEXT: pand %xmm3, %xmm1
1049 ; SSE2-NEXT: por %xmm4, %xmm1
1050 ; SSE2-NEXT: pand %xmm0, %xmm3
1051 ; SSE2-NEXT: por %xmm5, %xmm3
1052 ; SSE2-NEXT: psubq %xmm1, %xmm3
1053 ; SSE2-NEXT: movdqa %xmm3, %xmm1
1054 ; SSE2-NEXT: psrlq $1, %xmm1
1055 ; SSE2-NEXT: psrlq $33, %xmm3
1056 ; SSE2-NEXT: pmuludq %xmm2, %xmm3
1057 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1058 ; SSE2-NEXT: psrlq $32, %xmm4
1059 ; SSE2-NEXT: pmuludq %xmm1, %xmm4
1060 ; SSE2-NEXT: paddq %xmm3, %xmm4
1061 ; SSE2-NEXT: psllq $32, %xmm4
1062 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
1063 ; SSE2-NEXT: paddq %xmm1, %xmm0
1064 ; SSE2-NEXT: paddq %xmm4, %xmm0
1067 ; SSE41-LABEL: vec128_i64_unsigned_reg_reg:
1069 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1070 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
1071 ; SSE41-NEXT: movdqa %xmm1, %xmm3
1072 ; SSE41-NEXT: pxor %xmm0, %xmm3
1073 ; SSE41-NEXT: pxor %xmm2, %xmm0
1074 ; SSE41-NEXT: movdqa %xmm0, %xmm4
1075 ; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
1076 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1077 ; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
1078 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1079 ; SSE41-NEXT: pand %xmm5, %xmm3
1080 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
1081 ; SSE41-NEXT: por %xmm3, %xmm6
1082 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
1083 ; SSE41-NEXT: pand %xmm5, %xmm0
1084 ; SSE41-NEXT: por %xmm4, %xmm0
1085 ; SSE41-NEXT: movdqa %xmm2, %xmm3
1086 ; SSE41-NEXT: psubq %xmm1, %xmm3
1087 ; SSE41-NEXT: psubq %xmm2, %xmm1
1088 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
1089 ; SSE41-NEXT: movapd %xmm1, %xmm0
1090 ; SSE41-NEXT: psrlq $1, %xmm0
1091 ; SSE41-NEXT: psrlq $33, %xmm1
1092 ; SSE41-NEXT: pmuludq %xmm6, %xmm1
1093 ; SSE41-NEXT: movdqa %xmm6, %xmm3
1094 ; SSE41-NEXT: psrlq $32, %xmm3
1095 ; SSE41-NEXT: pmuludq %xmm0, %xmm3
1096 ; SSE41-NEXT: paddq %xmm1, %xmm3
1097 ; SSE41-NEXT: psllq $32, %xmm3
1098 ; SSE41-NEXT: pmuludq %xmm6, %xmm0
1099 ; SSE41-NEXT: paddq %xmm2, %xmm0
1100 ; SSE41-NEXT: paddq %xmm3, %xmm0
1103 ; AVX1-LABEL: vec128_i64_unsigned_reg_reg:
1105 ; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1106 ; AVX1-NEXT: # xmm2 = mem[0,0]
1107 ; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm3
1108 ; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
1109 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
1110 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1111 ; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm4
1112 ; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm1
1113 ; AVX1-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
1114 ; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
1115 ; AVX1-NEXT: vpsrlq $33, %xmm1, %xmm1
1116 ; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1117 ; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm4
1118 ; AVX1-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1119 ; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1120 ; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
1121 ; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1122 ; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1123 ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1126 ; AVX2-LABEL: vec128_i64_unsigned_reg_reg:
1128 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1129 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3
1130 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm2
1131 ; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
1132 ; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1133 ; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm4
1134 ; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm1
1135 ; AVX2-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
1136 ; AVX2-NEXT: vpsrlq $1, %xmm1, %xmm2
1137 ; AVX2-NEXT: vpsrlq $33, %xmm1, %xmm1
1138 ; AVX2-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1139 ; AVX2-NEXT: vpsrlq $32, %xmm3, %xmm4
1140 ; AVX2-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1141 ; AVX2-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1142 ; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
1143 ; AVX2-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1144 ; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1145 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1148 ; XOP-LABEL: vec128_i64_unsigned_reg_reg:
1150 ; XOP-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm2
1151 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1152 ; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
1153 ; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
1154 ; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
1155 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
1156 ; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
1157 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1158 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm4
1159 ; XOP-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1160 ; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1161 ; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
1162 ; XOP-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1163 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1164 ; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1167 ; AVX512F-LABEL: vec128_i64_unsigned_reg_reg:
1169 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1170 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1171 ; AVX512F-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
1172 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1173 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1174 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1175 ; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm2
1176 ; AVX512F-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
1177 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1178 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm2
1179 ; AVX512F-NEXT: vpsrlq $33, %xmm1, %xmm1
1180 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1181 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1182 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1183 ; AVX512F-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1184 ; AVX512F-NEXT: vpsllq $32, %xmm1, %xmm1
1185 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1186 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1187 ; AVX512F-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1188 ; AVX512F-NEXT: vzeroupper
1189 ; AVX512F-NEXT: retq
1191 ; AVX512VL-LABEL: vec128_i64_unsigned_reg_reg:
1192 ; AVX512VL: # %bb.0:
1193 ; AVX512VL-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
1194 ; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm2
1195 ; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm1
1196 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1197 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
1198 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
1199 ; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
1200 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1201 ; AVX512VL-NEXT: retq
1203 ; AVX512BW-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
1204 ; AVX512BW-FALLBACK: # %bb.0:
1205 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1206 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1207 ; AVX512BW-FALLBACK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
1208 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1209 ; AVX512BW-FALLBACK-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1210 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1211 ; AVX512BW-FALLBACK-NEXT: vpminuq %zmm1, %zmm0, %zmm2
1212 ; AVX512BW-FALLBACK-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
1213 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1214 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm2
1215 ; AVX512BW-FALLBACK-NEXT: vpsrlq $33, %xmm1, %xmm1
1216 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1217 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1218 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1219 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1220 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm1, %xmm1
1221 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1222 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1223 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1224 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1225 ; AVX512BW-FALLBACK-NEXT: retq
1226 %t3 = icmp ugt <2 x i64> %a1, %a2
1227 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1228 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1229 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1230 %t7 = sub <2 x i64> %t6, %t5
1231 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1232 %t9 = mul <2 x i64> %t8, %t4
1233 %a10 = add <2 x i64> %t9, %a1
1237 ; Values are loaded. Only check signed case.
1239 define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwind {
1240 ; SSE2-LABEL: vec128_i64_signed_mem_reg:
1242 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1243 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
1244 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1245 ; SSE2-NEXT: pxor %xmm2, %xmm3
1246 ; SSE2-NEXT: pxor %xmm1, %xmm2
1247 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1248 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
1249 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1250 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
1251 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1252 ; SSE2-NEXT: pand %xmm5, %xmm2
1253 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
1254 ; SSE2-NEXT: por %xmm2, %xmm3
1255 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
1256 ; SSE2-NEXT: por %xmm3, %xmm2
1257 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1258 ; SSE2-NEXT: pandn %xmm1, %xmm4
1259 ; SSE2-NEXT: movdqa %xmm3, %xmm5
1260 ; SSE2-NEXT: pandn %xmm0, %xmm5
1261 ; SSE2-NEXT: pand %xmm3, %xmm0
1262 ; SSE2-NEXT: por %xmm4, %xmm0
1263 ; SSE2-NEXT: pand %xmm1, %xmm3
1264 ; SSE2-NEXT: por %xmm5, %xmm3
1265 ; SSE2-NEXT: psubq %xmm0, %xmm3
1266 ; SSE2-NEXT: movdqa %xmm3, %xmm0
1267 ; SSE2-NEXT: psrlq $1, %xmm0
1268 ; SSE2-NEXT: psrlq $33, %xmm3
1269 ; SSE2-NEXT: pmuludq %xmm2, %xmm3
1270 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1271 ; SSE2-NEXT: psrlq $32, %xmm4
1272 ; SSE2-NEXT: pmuludq %xmm0, %xmm4
1273 ; SSE2-NEXT: paddq %xmm3, %xmm4
1274 ; SSE2-NEXT: psllq $32, %xmm4
1275 ; SSE2-NEXT: pmuludq %xmm2, %xmm0
1276 ; SSE2-NEXT: paddq %xmm1, %xmm0
1277 ; SSE2-NEXT: paddq %xmm4, %xmm0
1280 ; SSE41-LABEL: vec128_i64_signed_mem_reg:
1282 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1283 ; SSE41-NEXT: movdqa (%rdi), %xmm2
1284 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
1285 ; SSE41-NEXT: movdqa %xmm1, %xmm3
1286 ; SSE41-NEXT: pxor %xmm0, %xmm3
1287 ; SSE41-NEXT: pxor %xmm2, %xmm0
1288 ; SSE41-NEXT: movdqa %xmm0, %xmm4
1289 ; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
1290 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1291 ; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
1292 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1293 ; SSE41-NEXT: pand %xmm5, %xmm3
1294 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
1295 ; SSE41-NEXT: por %xmm3, %xmm6
1296 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
1297 ; SSE41-NEXT: pand %xmm5, %xmm0
1298 ; SSE41-NEXT: por %xmm4, %xmm0
1299 ; SSE41-NEXT: movdqa %xmm2, %xmm3
1300 ; SSE41-NEXT: psubq %xmm1, %xmm3
1301 ; SSE41-NEXT: psubq %xmm2, %xmm1
1302 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
1303 ; SSE41-NEXT: movapd %xmm1, %xmm0
1304 ; SSE41-NEXT: psrlq $1, %xmm0
1305 ; SSE41-NEXT: psrlq $33, %xmm1
1306 ; SSE41-NEXT: pmuludq %xmm6, %xmm1
1307 ; SSE41-NEXT: movdqa %xmm6, %xmm3
1308 ; SSE41-NEXT: psrlq $32, %xmm3
1309 ; SSE41-NEXT: pmuludq %xmm0, %xmm3
1310 ; SSE41-NEXT: paddq %xmm1, %xmm3
1311 ; SSE41-NEXT: psllq $32, %xmm3
1312 ; SSE41-NEXT: pmuludq %xmm6, %xmm0
1313 ; SSE41-NEXT: paddq %xmm2, %xmm0
1314 ; SSE41-NEXT: paddq %xmm3, %xmm0
1317 ; AVX-LABEL: vec128_i64_signed_mem_reg:
1319 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
1320 ; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
1321 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1322 ; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm4
1323 ; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0
1324 ; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm0, %xmm0
1325 ; AVX-NEXT: vpsrlq $1, %xmm0, %xmm2
1326 ; AVX-NEXT: vpsrlq $33, %xmm0, %xmm0
1327 ; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1328 ; AVX-NEXT: vpsrlq $32, %xmm3, %xmm4
1329 ; AVX-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1330 ; AVX-NEXT: vpaddq %xmm0, %xmm4, %xmm0
1331 ; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
1332 ; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1333 ; AVX-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1334 ; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1337 ; XOP-LABEL: vec128_i64_signed_mem_reg:
1339 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
1340 ; XOP-NEXT: vpcomgtq %xmm0, %xmm1, %xmm2
1341 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1342 ; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm4
1343 ; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm0
1344 ; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm0, %xmm0
1345 ; XOP-NEXT: vpsrlq $1, %xmm0, %xmm2
1346 ; XOP-NEXT: vpsrlq $33, %xmm0, %xmm0
1347 ; XOP-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1348 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm4
1349 ; XOP-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1350 ; XOP-NEXT: vpaddq %xmm0, %xmm4, %xmm0
1351 ; XOP-NEXT: vpsllq $32, %xmm0, %xmm0
1352 ; XOP-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1353 ; XOP-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1354 ; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1357 ; AVX512F-LABEL: vec128_i64_signed_mem_reg:
1359 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1360 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
1361 ; AVX512F-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
1362 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1363 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1364 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1365 ; AVX512F-NEXT: vpminsq %zmm0, %zmm1, %zmm2
1366 ; AVX512F-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
1367 ; AVX512F-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1368 ; AVX512F-NEXT: vpsrlq $1, %xmm0, %xmm2
1369 ; AVX512F-NEXT: vpsrlq $33, %xmm0, %xmm0
1370 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1371 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1372 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1373 ; AVX512F-NEXT: vpaddq %xmm0, %xmm4, %xmm0
1374 ; AVX512F-NEXT: vpsllq $32, %xmm0, %xmm0
1375 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1376 ; AVX512F-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1377 ; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1378 ; AVX512F-NEXT: vzeroupper
1379 ; AVX512F-NEXT: retq
1381 ; AVX512VL-LABEL: vec128_i64_signed_mem_reg:
1382 ; AVX512VL: # %bb.0:
1383 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
1384 ; AVX512VL-NEXT: vpcmpgtq %xmm0, %xmm1, %k1
1385 ; AVX512VL-NEXT: vpminsq %xmm0, %xmm1, %xmm2
1386 ; AVX512VL-NEXT: vpmaxsq %xmm0, %xmm1, %xmm0
1387 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1388 ; AVX512VL-NEXT: vpsrlq $1, %xmm0, %xmm0
1389 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
1390 ; AVX512VL-NEXT: vpsubq %xmm0, %xmm2, %xmm0 {%k1}
1391 ; AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1392 ; AVX512VL-NEXT: retq
1394 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
1395 ; AVX512BW-FALLBACK: # %bb.0:
1396 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1397 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1398 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
1399 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1400 ; AVX512BW-FALLBACK-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1401 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1402 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm0, %zmm1, %zmm2
1403 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
1404 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1405 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm0, %xmm2
1406 ; AVX512BW-FALLBACK-NEXT: vpsrlq $33, %xmm0, %xmm0
1407 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1408 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1409 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1410 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
1411 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm0, %xmm0
1412 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1413 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1414 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1415 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1416 ; AVX512BW-FALLBACK-NEXT: retq
1417 %a1 = load <2 x i64>, ptr %a1_addr
1418 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
1419 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1420 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1421 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1422 %t7 = sub <2 x i64> %t6, %t5
1423 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1424 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
1425 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
1429 define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwind {
1430 ; SSE2-LABEL: vec128_i64_signed_reg_mem:
1432 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1433 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
1434 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1435 ; SSE2-NEXT: pxor %xmm2, %xmm3
1436 ; SSE2-NEXT: pxor %xmm1, %xmm2
1437 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1438 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
1439 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1440 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
1441 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1442 ; SSE2-NEXT: pand %xmm5, %xmm2
1443 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
1444 ; SSE2-NEXT: por %xmm2, %xmm3
1445 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
1446 ; SSE2-NEXT: por %xmm3, %xmm2
1447 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1448 ; SSE2-NEXT: pandn %xmm0, %xmm4
1449 ; SSE2-NEXT: movdqa %xmm3, %xmm5
1450 ; SSE2-NEXT: pandn %xmm1, %xmm5
1451 ; SSE2-NEXT: pand %xmm3, %xmm1
1452 ; SSE2-NEXT: por %xmm4, %xmm1
1453 ; SSE2-NEXT: pand %xmm0, %xmm3
1454 ; SSE2-NEXT: por %xmm5, %xmm3
1455 ; SSE2-NEXT: psubq %xmm1, %xmm3
1456 ; SSE2-NEXT: movdqa %xmm3, %xmm1
1457 ; SSE2-NEXT: psrlq $1, %xmm1
1458 ; SSE2-NEXT: psrlq $33, %xmm3
1459 ; SSE2-NEXT: pmuludq %xmm2, %xmm3
1460 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1461 ; SSE2-NEXT: psrlq $32, %xmm4
1462 ; SSE2-NEXT: pmuludq %xmm1, %xmm4
1463 ; SSE2-NEXT: paddq %xmm3, %xmm4
1464 ; SSE2-NEXT: psllq $32, %xmm4
1465 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
1466 ; SSE2-NEXT: paddq %xmm1, %xmm0
1467 ; SSE2-NEXT: paddq %xmm4, %xmm0
1470 ; SSE41-LABEL: vec128_i64_signed_reg_mem:
1472 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1473 ; SSE41-NEXT: movdqa (%rdi), %xmm2
1474 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
1475 ; SSE41-NEXT: movdqa %xmm1, %xmm3
1476 ; SSE41-NEXT: pxor %xmm0, %xmm3
1477 ; SSE41-NEXT: pxor %xmm2, %xmm0
1478 ; SSE41-NEXT: movdqa %xmm3, %xmm4
1479 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
1480 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1481 ; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
1482 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1483 ; SSE41-NEXT: pand %xmm5, %xmm3
1484 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
1485 ; SSE41-NEXT: por %xmm3, %xmm6
1486 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
1487 ; SSE41-NEXT: pand %xmm5, %xmm0
1488 ; SSE41-NEXT: por %xmm4, %xmm0
1489 ; SSE41-NEXT: movdqa %xmm1, %xmm3
1490 ; SSE41-NEXT: psubq %xmm2, %xmm3
1491 ; SSE41-NEXT: psubq %xmm1, %xmm2
1492 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
1493 ; SSE41-NEXT: movapd %xmm2, %xmm0
1494 ; SSE41-NEXT: psrlq $1, %xmm0
1495 ; SSE41-NEXT: psrlq $33, %xmm2
1496 ; SSE41-NEXT: pmuludq %xmm6, %xmm2
1497 ; SSE41-NEXT: movdqa %xmm6, %xmm3
1498 ; SSE41-NEXT: psrlq $32, %xmm3
1499 ; SSE41-NEXT: pmuludq %xmm0, %xmm3
1500 ; SSE41-NEXT: paddq %xmm2, %xmm3
1501 ; SSE41-NEXT: psllq $32, %xmm3
1502 ; SSE41-NEXT: pmuludq %xmm6, %xmm0
1503 ; SSE41-NEXT: paddq %xmm1, %xmm0
1504 ; SSE41-NEXT: paddq %xmm3, %xmm0
1507 ; AVX-LABEL: vec128_i64_signed_reg_mem:
1509 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
1510 ; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
1511 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1512 ; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm4
1513 ; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
1514 ; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
1515 ; AVX-NEXT: vpsrlq $1, %xmm1, %xmm2
1516 ; AVX-NEXT: vpsrlq $33, %xmm1, %xmm1
1517 ; AVX-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1518 ; AVX-NEXT: vpsrlq $32, %xmm3, %xmm4
1519 ; AVX-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1520 ; AVX-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1521 ; AVX-NEXT: vpsllq $32, %xmm1, %xmm1
1522 ; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1523 ; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1524 ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1527 ; XOP-LABEL: vec128_i64_signed_reg_mem:
1529 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
1530 ; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
1531 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1532 ; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
1533 ; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
1534 ; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
1535 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
1536 ; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
1537 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1538 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm4
1539 ; XOP-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1540 ; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1541 ; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
1542 ; XOP-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1543 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1544 ; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1547 ; AVX512F-LABEL: vec128_i64_signed_reg_mem:
1549 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1550 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
1551 ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1552 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1553 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1554 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1555 ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1556 ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1557 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1558 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm2
1559 ; AVX512F-NEXT: vpsrlq $33, %xmm1, %xmm1
1560 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1561 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1562 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1563 ; AVX512F-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1564 ; AVX512F-NEXT: vpsllq $32, %xmm1, %xmm1
1565 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1566 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1567 ; AVX512F-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1568 ; AVX512F-NEXT: vzeroupper
1569 ; AVX512F-NEXT: retq
1571 ; AVX512VL-LABEL: vec128_i64_signed_reg_mem:
1572 ; AVX512VL: # %bb.0:
1573 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
1574 ; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
1575 ; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
1576 ; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
1577 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1578 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
1579 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
1580 ; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
1581 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1582 ; AVX512VL-NEXT: retq
1584 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
1585 ; AVX512BW-FALLBACK: # %bb.0:
1586 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1587 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1588 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1589 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1590 ; AVX512BW-FALLBACK-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1591 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1592 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1593 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1594 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1595 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm2
1596 ; AVX512BW-FALLBACK-NEXT: vpsrlq $33, %xmm1, %xmm1
1597 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1598 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1599 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1600 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1601 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm1, %xmm1
1602 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1603 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1604 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1605 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1606 ; AVX512BW-FALLBACK-NEXT: retq
1607 %a2 = load <2 x i64>, ptr %a2_addr
1608 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
1609 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1610 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1611 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1612 %t7 = sub <2 x i64> %t6, %t5
1613 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1614 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
1615 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
1619 define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
1620 ; SSE2-LABEL: vec128_i64_signed_mem_mem:
1622 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1623 ; SSE2-NEXT: movdqa (%rsi), %xmm0
1624 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
1625 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1626 ; SSE2-NEXT: pxor %xmm2, %xmm3
1627 ; SSE2-NEXT: pxor %xmm1, %xmm2
1628 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1629 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
1630 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1631 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
1632 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1633 ; SSE2-NEXT: pand %xmm5, %xmm2
1634 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
1635 ; SSE2-NEXT: por %xmm2, %xmm3
1636 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1]
1637 ; SSE2-NEXT: por %xmm3, %xmm2
1638 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1639 ; SSE2-NEXT: pandn %xmm1, %xmm4
1640 ; SSE2-NEXT: movdqa %xmm3, %xmm5
1641 ; SSE2-NEXT: pandn %xmm0, %xmm5
1642 ; SSE2-NEXT: pand %xmm3, %xmm0
1643 ; SSE2-NEXT: por %xmm4, %xmm0
1644 ; SSE2-NEXT: pand %xmm1, %xmm3
1645 ; SSE2-NEXT: por %xmm5, %xmm3
1646 ; SSE2-NEXT: psubq %xmm0, %xmm3
1647 ; SSE2-NEXT: movdqa %xmm3, %xmm0
1648 ; SSE2-NEXT: psrlq $1, %xmm0
1649 ; SSE2-NEXT: psrlq $33, %xmm3
1650 ; SSE2-NEXT: pmuludq %xmm2, %xmm3
1651 ; SSE2-NEXT: movdqa %xmm2, %xmm4
1652 ; SSE2-NEXT: psrlq $32, %xmm4
1653 ; SSE2-NEXT: pmuludq %xmm0, %xmm4
1654 ; SSE2-NEXT: paddq %xmm3, %xmm4
1655 ; SSE2-NEXT: psllq $32, %xmm4
1656 ; SSE2-NEXT: pmuludq %xmm2, %xmm0
1657 ; SSE2-NEXT: paddq %xmm1, %xmm0
1658 ; SSE2-NEXT: paddq %xmm4, %xmm0
1661 ; SSE41-LABEL: vec128_i64_signed_mem_mem:
1663 ; SSE41-NEXT: movdqa (%rdi), %xmm1
1664 ; SSE41-NEXT: movdqa (%rsi), %xmm2
1665 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
1666 ; SSE41-NEXT: movdqa %xmm2, %xmm3
1667 ; SSE41-NEXT: pxor %xmm0, %xmm3
1668 ; SSE41-NEXT: pxor %xmm1, %xmm0
1669 ; SSE41-NEXT: movdqa %xmm0, %xmm4
1670 ; SSE41-NEXT: pcmpgtd %xmm3, %xmm4
1671 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1672 ; SSE41-NEXT: pcmpeqd %xmm3, %xmm0
1673 ; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
1674 ; SSE41-NEXT: pand %xmm5, %xmm3
1675 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
1676 ; SSE41-NEXT: por %xmm3, %xmm6
1677 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
1678 ; SSE41-NEXT: pand %xmm5, %xmm0
1679 ; SSE41-NEXT: por %xmm4, %xmm0
1680 ; SSE41-NEXT: movdqa %xmm1, %xmm3
1681 ; SSE41-NEXT: psubq %xmm2, %xmm3
1682 ; SSE41-NEXT: psubq %xmm1, %xmm2
1683 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
1684 ; SSE41-NEXT: movapd %xmm2, %xmm0
1685 ; SSE41-NEXT: psrlq $1, %xmm0
1686 ; SSE41-NEXT: psrlq $33, %xmm2
1687 ; SSE41-NEXT: pmuludq %xmm6, %xmm2
1688 ; SSE41-NEXT: movdqa %xmm6, %xmm3
1689 ; SSE41-NEXT: psrlq $32, %xmm3
1690 ; SSE41-NEXT: pmuludq %xmm0, %xmm3
1691 ; SSE41-NEXT: paddq %xmm2, %xmm3
1692 ; SSE41-NEXT: psllq $32, %xmm3
1693 ; SSE41-NEXT: pmuludq %xmm6, %xmm0
1694 ; SSE41-NEXT: paddq %xmm1, %xmm0
1695 ; SSE41-NEXT: paddq %xmm3, %xmm0
1698 ; AVX-LABEL: vec128_i64_signed_mem_mem:
1700 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
1701 ; AVX-NEXT: vmovdqa (%rsi), %xmm1
1702 ; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
1703 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1704 ; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm4
1705 ; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
1706 ; AVX-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
1707 ; AVX-NEXT: vpsrlq $1, %xmm1, %xmm2
1708 ; AVX-NEXT: vpsrlq $33, %xmm1, %xmm1
1709 ; AVX-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1710 ; AVX-NEXT: vpsrlq $32, %xmm3, %xmm4
1711 ; AVX-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1712 ; AVX-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1713 ; AVX-NEXT: vpsllq $32, %xmm1, %xmm1
1714 ; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1715 ; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1716 ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1719 ; XOP-LABEL: vec128_i64_signed_mem_mem:
1721 ; XOP-NEXT: vmovdqa (%rdi), %xmm0
1722 ; XOP-NEXT: vmovdqa (%rsi), %xmm1
1723 ; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
1724 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
1725 ; XOP-NEXT: vpsubq %xmm1, %xmm0, %xmm4
1726 ; XOP-NEXT: vpsubq %xmm0, %xmm1, %xmm1
1727 ; XOP-NEXT: vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
1728 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm2
1729 ; XOP-NEXT: vpsrlq $33, %xmm1, %xmm1
1730 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1731 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm4
1732 ; XOP-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1733 ; XOP-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1734 ; XOP-NEXT: vpsllq $32, %xmm1, %xmm1
1735 ; XOP-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1736 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1737 ; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1740 ; AVX512F-LABEL: vec128_i64_signed_mem_mem:
1742 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1743 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
1744 ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1745 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1746 ; AVX512F-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1747 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1748 ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1749 ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1750 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1751 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm2
1752 ; AVX512F-NEXT: vpsrlq $33, %xmm1, %xmm1
1753 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1754 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1755 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1756 ; AVX512F-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1757 ; AVX512F-NEXT: vpsllq $32, %xmm1, %xmm1
1758 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1759 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1760 ; AVX512F-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1761 ; AVX512F-NEXT: vzeroupper
1762 ; AVX512F-NEXT: retq
1764 ; AVX512VL-LABEL: vec128_i64_signed_mem_mem:
1765 ; AVX512VL: # %bb.0:
1766 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
1767 ; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
1768 ; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
1769 ; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
1770 ; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
1771 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1772 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
1773 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
1774 ; AVX512VL-NEXT: vpsubq %xmm1, %xmm2, %xmm1 {%k1}
1775 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1776 ; AVX512VL-NEXT: retq
1778 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
1779 ; AVX512BW-FALLBACK: # %bb.0:
1780 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
1781 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
1782 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1783 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1784 ; AVX512BW-FALLBACK-NEXT: vpbroadcastq {{.*#+}} xmm3 = [1,1]
1785 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1786 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1787 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1788 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1789 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm2
1790 ; AVX512BW-FALLBACK-NEXT: vpsrlq $33, %xmm1, %xmm1
1791 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1792 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1793 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
1794 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm4, %xmm1
1795 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm1, %xmm1
1796 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1797 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1798 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1799 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1800 ; AVX512BW-FALLBACK-NEXT: retq
1801 %a1 = load <2 x i64>, ptr %a1_addr
1802 %a2 = load <2 x i64>, ptr %a2_addr
1803 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
1804 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1805 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1806 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1807 %t7 = sub <2 x i64> %t6, %t5
1808 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1809 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
1810 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
1814 ; ---------------------------------------------------------------------------- ;
1815 ; 16-bit width. 128 / 16 = 8 elts.
1816 ; ---------------------------------------------------------------------------- ;
1818 ; Values come from regs
1820 define <8 x i16> @vec128_i16_signed_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
1821 ; SSE-LABEL: vec128_i16_signed_reg_reg:
1823 ; SSE-NEXT: movdqa %xmm0, %xmm2
1824 ; SSE-NEXT: pcmpgtw %xmm1, %xmm2
1825 ; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
1826 ; SSE-NEXT: movdqa %xmm0, %xmm3
1827 ; SSE-NEXT: pminsw %xmm1, %xmm3
1828 ; SSE-NEXT: pmaxsw %xmm0, %xmm1
1829 ; SSE-NEXT: psubw %xmm3, %xmm1
1830 ; SSE-NEXT: psrlw $1, %xmm1
1831 ; SSE-NEXT: pmullw %xmm1, %xmm2
1832 ; SSE-NEXT: paddw %xmm2, %xmm0
1835 ; AVX-LABEL: vec128_i16_signed_reg_reg:
1837 ; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
1838 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1839 ; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm3
1840 ; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
1841 ; AVX-NEXT: vpsubw %xmm3, %xmm1, %xmm1
1842 ; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
1843 ; AVX-NEXT: vpmullw %xmm2, %xmm1, %xmm1
1844 ; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
1847 ; XOP-LABEL: vec128_i16_signed_reg_reg:
1849 ; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
1850 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1851 ; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
1852 ; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
1853 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
1854 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
1855 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
1858 ; AVX512F-LABEL: vec128_i16_signed_reg_reg:
1860 ; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
1861 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1862 ; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
1863 ; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
1864 ; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
1865 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
1866 ; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
1867 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
1868 ; AVX512F-NEXT: retq
1870 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
1871 ; AVX512VL-FALLBACK: # %bb.0:
1872 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
1873 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3
1874 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
1875 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
1876 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1
1877 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
1878 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm1, %xmm2, %xmm1
1879 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
1880 ; AVX512VL-FALLBACK-NEXT: retq
1882 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
1883 ; AVX512BW-FALLBACK: # %bb.0:
1884 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1885 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1886 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
1887 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1888 ; AVX512BW-FALLBACK-NEXT: vpbroadcastw {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
1889 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
1890 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
1891 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
1892 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
1893 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
1894 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
1895 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
1896 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1897 ; AVX512BW-FALLBACK-NEXT: retq
1899 ; AVX512VLBW-LABEL: vec128_i16_signed_reg_reg:
1900 ; AVX512VLBW: # %bb.0:
1901 ; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
1902 ; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
1903 ; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
1904 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
1905 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
1906 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
1907 ; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
1908 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
1909 ; AVX512VLBW-NEXT: retq
1910 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
1911 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1912 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
1913 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
1914 %t7 = sub <8 x i16> %t6, %t5
1915 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1916 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
1917 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
1921 define <8 x i16> @vec128_i16_unsigned_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
1922 ; SSE2-LABEL: vec128_i16_unsigned_reg_reg:
1924 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
1925 ; SSE2-NEXT: movdqa %xmm1, %xmm3
1926 ; SSE2-NEXT: pxor %xmm2, %xmm3
1927 ; SSE2-NEXT: pxor %xmm0, %xmm2
1928 ; SSE2-NEXT: pcmpgtw %xmm3, %xmm2
1929 ; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
1930 ; SSE2-NEXT: movdqa %xmm0, %xmm3
1931 ; SSE2-NEXT: psubusw %xmm1, %xmm3
1932 ; SSE2-NEXT: psubusw %xmm0, %xmm1
1933 ; SSE2-NEXT: por %xmm1, %xmm3
1934 ; SSE2-NEXT: psrlw $1, %xmm3
1935 ; SSE2-NEXT: pmullw %xmm2, %xmm3
1936 ; SSE2-NEXT: paddw %xmm3, %xmm0
1939 ; SSE41-LABEL: vec128_i16_unsigned_reg_reg:
1941 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1942 ; SSE41-NEXT: pminuw %xmm1, %xmm2
1943 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1944 ; SSE41-NEXT: pcmpeqw %xmm2, %xmm3
1945 ; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
1946 ; SSE41-NEXT: pxor %xmm3, %xmm4
1947 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
1948 ; SSE41-NEXT: pmaxuw %xmm0, %xmm1
1949 ; SSE41-NEXT: psubw %xmm2, %xmm1
1950 ; SSE41-NEXT: psrlw $1, %xmm1
1951 ; SSE41-NEXT: pmullw %xmm1, %xmm4
1952 ; SSE41-NEXT: paddw %xmm4, %xmm0
1955 ; AVX-LABEL: vec128_i16_unsigned_reg_reg:
1957 ; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm2
1958 ; AVX-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
1959 ; AVX-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
1960 ; AVX-NEXT: vpxor %xmm4, %xmm3, %xmm3
1961 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
1962 ; AVX-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
1963 ; AVX-NEXT: vpsubw %xmm2, %xmm1, %xmm1
1964 ; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
1965 ; AVX-NEXT: vpmullw %xmm3, %xmm1, %xmm1
1966 ; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
1969 ; XOP-LABEL: vec128_i16_unsigned_reg_reg:
1971 ; XOP-NEXT: vpcomgtuw %xmm1, %xmm0, %xmm2
1972 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
1973 ; XOP-NEXT: vpminuw %xmm1, %xmm0, %xmm3
1974 ; XOP-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
1975 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
1976 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
1977 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
1980 ; AVX512F-LABEL: vec128_i16_unsigned_reg_reg:
1982 ; AVX512F-NEXT: vpminuw %xmm1, %xmm0, %xmm2
1983 ; AVX512F-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
1984 ; AVX512F-NEXT: vpternlogq $15, %zmm3, %zmm3, %zmm3
1985 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
1986 ; AVX512F-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
1987 ; AVX512F-NEXT: vpsubw %xmm2, %xmm1, %xmm1
1988 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
1989 ; AVX512F-NEXT: vpmullw %xmm3, %xmm1, %xmm1
1990 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
1991 ; AVX512F-NEXT: vzeroupper
1992 ; AVX512F-NEXT: retq
1994 ; AVX512VL-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
1995 ; AVX512VL-FALLBACK: # %bb.0:
1996 ; AVX512VL-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
1997 ; AVX512VL-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
1998 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
1999 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2000 ; AVX512VL-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm2
2001 ; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %xmm2, %xmm2, %xmm2
2002 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm2, %xmm1, %xmm1
2003 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2004 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2005 ; AVX512VL-FALLBACK-NEXT: retq
2007 ; AVX512BW-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
2008 ; AVX512BW-FALLBACK: # %bb.0:
2009 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
2010 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2011 ; AVX512BW-FALLBACK-NEXT: vpcmpnleuw %zmm1, %zmm0, %k1
2012 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2013 ; AVX512BW-FALLBACK-NEXT: vpbroadcastw {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2014 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2015 ; AVX512BW-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2016 ; AVX512BW-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2017 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2018 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2019 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2020 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2021 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2022 ; AVX512BW-FALLBACK-NEXT: retq
2024 ; AVX512VLBW-LABEL: vec128_i16_unsigned_reg_reg:
2025 ; AVX512VLBW: # %bb.0:
2026 ; AVX512VLBW-NEXT: vpcmpnleuw %xmm1, %xmm0, %k1
2027 ; AVX512VLBW-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2028 ; AVX512VLBW-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2029 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2030 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2031 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
2032 ; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
2033 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2034 ; AVX512VLBW-NEXT: retq
2035 %t3 = icmp ugt <8 x i16> %a1, %a2
2036 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2037 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2038 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2039 %t7 = sub <8 x i16> %t6, %t5
2040 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2041 %t9 = mul <8 x i16> %t8, %t4
2042 %a10 = add <8 x i16> %t9, %a1
2046 ; Values are loaded. Only check signed case.
2048 define <8 x i16> @vec128_i16_signed_mem_reg(ptr %a1_addr, <8 x i16> %a2) nounwind {
2049 ; SSE-LABEL: vec128_i16_signed_mem_reg:
2051 ; SSE-NEXT: movdqa (%rdi), %xmm1
2052 ; SSE-NEXT: movdqa %xmm1, %xmm2
2053 ; SSE-NEXT: pcmpgtw %xmm0, %xmm2
2054 ; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
2055 ; SSE-NEXT: movdqa %xmm1, %xmm3
2056 ; SSE-NEXT: pminsw %xmm0, %xmm3
2057 ; SSE-NEXT: pmaxsw %xmm1, %xmm0
2058 ; SSE-NEXT: psubw %xmm3, %xmm0
2059 ; SSE-NEXT: psrlw $1, %xmm0
2060 ; SSE-NEXT: pmullw %xmm2, %xmm0
2061 ; SSE-NEXT: paddw %xmm1, %xmm0
2064 ; AVX-LABEL: vec128_i16_signed_mem_reg:
2066 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
2067 ; AVX-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
2068 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2069 ; AVX-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2070 ; AVX-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2071 ; AVX-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2072 ; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
2073 ; AVX-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2074 ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2077 ; XOP-LABEL: vec128_i16_signed_mem_reg:
2079 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
2080 ; XOP-NEXT: vpcomgtw %xmm0, %xmm1, %xmm2
2081 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2082 ; XOP-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2083 ; XOP-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2084 ; XOP-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2085 ; XOP-NEXT: vpsrlw $1, %xmm0, %xmm0
2086 ; XOP-NEXT: vpmacsww %xmm1, %xmm2, %xmm0, %xmm0
2089 ; AVX512F-LABEL: vec128_i16_signed_mem_reg:
2091 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
2092 ; AVX512F-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
2093 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2094 ; AVX512F-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2095 ; AVX512F-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2096 ; AVX512F-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2097 ; AVX512F-NEXT: vpsrlw $1, %xmm0, %xmm0
2098 ; AVX512F-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2099 ; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2100 ; AVX512F-NEXT: retq
2102 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
2103 ; AVX512VL-FALLBACK: # %bb.0:
2104 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2105 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm2
2106 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm3
2107 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
2108 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
2109 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
2110 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm0, %xmm2, %xmm2
2111 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm0, %xmm2, %xmm0
2112 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2113 ; AVX512VL-FALLBACK-NEXT: retq
2115 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
2116 ; AVX512BW-FALLBACK: # %bb.0:
2117 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2118 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2119 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm0, %zmm1, %k1
2120 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2121 ; AVX512BW-FALLBACK-NEXT: vpbroadcastw {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2122 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2123 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm2
2124 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2125 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm0, %xmm0
2126 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
2127 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm0, %xmm0
2128 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2129 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2130 ; AVX512BW-FALLBACK-NEXT: retq
2132 ; AVX512VLBW-LABEL: vec128_i16_signed_mem_reg:
2133 ; AVX512VLBW: # %bb.0:
2134 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
2135 ; AVX512VLBW-NEXT: vpcmpgtw %xmm0, %xmm1, %k1
2136 ; AVX512VLBW-NEXT: vpminsw %xmm0, %xmm1, %xmm2
2137 ; AVX512VLBW-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2138 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm0, %xmm0
2139 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm0, %xmm0
2140 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
2141 ; AVX512VLBW-NEXT: vpsubw %xmm0, %xmm2, %xmm0 {%k1}
2142 ; AVX512VLBW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2143 ; AVX512VLBW-NEXT: retq
2144 %a1 = load <8 x i16>, ptr %a1_addr
2145 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
2146 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2147 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2148 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2149 %t7 = sub <8 x i16> %t6, %t5
2150 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2151 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
2152 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
2156 define <8 x i16> @vec128_i16_signed_reg_mem(<8 x i16> %a1, ptr %a2_addr) nounwind {
2157 ; SSE-LABEL: vec128_i16_signed_reg_mem:
2159 ; SSE-NEXT: movdqa (%rdi), %xmm1
2160 ; SSE-NEXT: movdqa %xmm0, %xmm2
2161 ; SSE-NEXT: pcmpgtw %xmm1, %xmm2
2162 ; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
2163 ; SSE-NEXT: movdqa %xmm0, %xmm3
2164 ; SSE-NEXT: pminsw %xmm1, %xmm3
2165 ; SSE-NEXT: pmaxsw %xmm0, %xmm1
2166 ; SSE-NEXT: psubw %xmm3, %xmm1
2167 ; SSE-NEXT: psrlw $1, %xmm1
2168 ; SSE-NEXT: pmullw %xmm2, %xmm1
2169 ; SSE-NEXT: paddw %xmm1, %xmm0
2172 ; AVX-LABEL: vec128_i16_signed_reg_mem:
2174 ; AVX-NEXT: vmovdqa (%rdi), %xmm1
2175 ; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2176 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2177 ; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2178 ; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2179 ; AVX-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2180 ; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
2181 ; AVX-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2182 ; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2185 ; XOP-LABEL: vec128_i16_signed_reg_mem:
2187 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
2188 ; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
2189 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2190 ; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2191 ; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2192 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2193 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
2194 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
2197 ; AVX512F-LABEL: vec128_i16_signed_reg_mem:
2199 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
2200 ; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2201 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2202 ; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2203 ; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2204 ; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2205 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2206 ; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2207 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2208 ; AVX512F-NEXT: retq
2210 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
2211 ; AVX512VL-FALLBACK: # %bb.0:
2212 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2213 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2214 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3
2215 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
2216 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
2217 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1
2218 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
2219 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm1, %xmm2, %xmm1
2220 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2221 ; AVX512VL-FALLBACK-NEXT: retq
2223 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
2224 ; AVX512BW-FALLBACK: # %bb.0:
2225 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2226 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2227 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
2228 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2229 ; AVX512BW-FALLBACK-NEXT: vpbroadcastw {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2230 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2231 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2232 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2233 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2234 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2235 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2236 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2237 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2238 ; AVX512BW-FALLBACK-NEXT: retq
2240 ; AVX512VLBW-LABEL: vec128_i16_signed_reg_mem:
2241 ; AVX512VLBW: # %bb.0:
2242 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
2243 ; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
2244 ; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2245 ; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2246 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2247 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2248 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
2249 ; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
2250 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2251 ; AVX512VLBW-NEXT: retq
2252 %a2 = load <8 x i16>, ptr %a2_addr
2253 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
2254 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2255 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2256 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2257 %t7 = sub <8 x i16> %t6, %t5
2258 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2259 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
2260 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
2264 define <8 x i16> @vec128_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
2265 ; SSE-LABEL: vec128_i16_signed_mem_mem:
2267 ; SSE-NEXT: movdqa (%rdi), %xmm1
2268 ; SSE-NEXT: movdqa (%rsi), %xmm0
2269 ; SSE-NEXT: movdqa %xmm1, %xmm2
2270 ; SSE-NEXT: pcmpgtw %xmm0, %xmm2
2271 ; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
2272 ; SSE-NEXT: movdqa %xmm1, %xmm3
2273 ; SSE-NEXT: pminsw %xmm0, %xmm3
2274 ; SSE-NEXT: pmaxsw %xmm1, %xmm0
2275 ; SSE-NEXT: psubw %xmm3, %xmm0
2276 ; SSE-NEXT: psrlw $1, %xmm0
2277 ; SSE-NEXT: pmullw %xmm2, %xmm0
2278 ; SSE-NEXT: paddw %xmm1, %xmm0
2281 ; AVX-LABEL: vec128_i16_signed_mem_mem:
2283 ; AVX-NEXT: vmovdqa (%rdi), %xmm0
2284 ; AVX-NEXT: vmovdqa (%rsi), %xmm1
2285 ; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2286 ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2287 ; AVX-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2288 ; AVX-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2289 ; AVX-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2290 ; AVX-NEXT: vpsrlw $1, %xmm1, %xmm1
2291 ; AVX-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2292 ; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2295 ; XOP-LABEL: vec128_i16_signed_mem_mem:
2297 ; XOP-NEXT: vmovdqa (%rdi), %xmm0
2298 ; XOP-NEXT: vmovdqa (%rsi), %xmm1
2299 ; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
2300 ; XOP-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2301 ; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2302 ; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2303 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2304 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
2305 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
2308 ; AVX512F-LABEL: vec128_i16_signed_mem_mem:
2310 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
2311 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
2312 ; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2313 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2314 ; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2315 ; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2316 ; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2317 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2318 ; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2319 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2320 ; AVX512F-NEXT: retq
2322 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
2323 ; AVX512VL-FALLBACK: # %bb.0:
2324 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
2325 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
2326 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2327 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm3
2328 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm3, %xmm2
2329 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
2330 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm1
2331 ; AVX512VL-FALLBACK-NEXT: vpxor %xmm1, %xmm2, %xmm2
2332 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm1, %xmm2, %xmm1
2333 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2334 ; AVX512VL-FALLBACK-NEXT: retq
2336 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
2337 ; AVX512BW-FALLBACK: # %bb.0:
2338 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
2339 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
2340 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
2341 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2342 ; AVX512BW-FALLBACK-NEXT: vpbroadcastw {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2343 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2344 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2345 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2346 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2347 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2348 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2349 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2350 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2351 ; AVX512BW-FALLBACK-NEXT: retq
2353 ; AVX512VLBW-LABEL: vec128_i16_signed_mem_mem:
2354 ; AVX512VLBW: # %bb.0:
2355 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm0
2356 ; AVX512VLBW-NEXT: vmovdqa (%rsi), %xmm1
2357 ; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
2358 ; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2359 ; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2360 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2361 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2362 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
2363 ; AVX512VLBW-NEXT: vpsubw %xmm1, %xmm2, %xmm1 {%k1}
2364 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2365 ; AVX512VLBW-NEXT: retq
2366 %a1 = load <8 x i16>, ptr %a1_addr
2367 %a2 = load <8 x i16>, ptr %a2_addr
2368 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
2369 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2370 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2371 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2372 %t7 = sub <8 x i16> %t6, %t5
2373 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2374 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
2375 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
2379 ; ---------------------------------------------------------------------------- ;
2380 ; 8-bit width. 128 / 8 = 16 elts.
2381 ; ---------------------------------------------------------------------------- ;
2383 ; Values come from regs
2385 define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
2386 ; SSE2-LABEL: vec128_i8_signed_reg_reg:
2388 ; SSE2-NEXT: movdqa %xmm0, %xmm3
2389 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
2390 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2391 ; SSE2-NEXT: por %xmm3, %xmm2
2392 ; SSE2-NEXT: movdqa %xmm0, %xmm4
2393 ; SSE2-NEXT: psubb %xmm1, %xmm4
2394 ; SSE2-NEXT: psubb %xmm0, %xmm1
2395 ; SSE2-NEXT: pand %xmm3, %xmm4
2396 ; SSE2-NEXT: pandn %xmm1, %xmm3
2397 ; SSE2-NEXT: por %xmm4, %xmm3
2398 ; SSE2-NEXT: psrlw $1, %xmm3
2399 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
2400 ; SSE2-NEXT: movdqa %xmm3, %xmm1
2401 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2402 ; SSE2-NEXT: movdqa %xmm2, %xmm4
2403 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2404 ; SSE2-NEXT: pmullw %xmm1, %xmm4
2405 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2406 ; SSE2-NEXT: pand %xmm1, %xmm4
2407 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2408 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2409 ; SSE2-NEXT: pmullw %xmm3, %xmm2
2410 ; SSE2-NEXT: pand %xmm1, %xmm2
2411 ; SSE2-NEXT: packuswb %xmm4, %xmm2
2412 ; SSE2-NEXT: paddb %xmm2, %xmm0
2415 ; SSE41-LABEL: vec128_i8_signed_reg_reg:
2417 ; SSE41-NEXT: movdqa %xmm0, %xmm2
2418 ; SSE41-NEXT: pcmpgtb %xmm1, %xmm2
2419 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
2420 ; SSE41-NEXT: movdqa %xmm0, %xmm3
2421 ; SSE41-NEXT: pminsb %xmm1, %xmm3
2422 ; SSE41-NEXT: pmaxsb %xmm0, %xmm1
2423 ; SSE41-NEXT: psubb %xmm3, %xmm1
2424 ; SSE41-NEXT: psrlw $1, %xmm1
2425 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
2426 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2427 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2428 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2429 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2430 ; SSE41-NEXT: pmullw %xmm1, %xmm2
2431 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2432 ; SSE41-NEXT: pand %xmm1, %xmm2
2433 ; SSE41-NEXT: pmullw %xmm4, %xmm3
2434 ; SSE41-NEXT: pand %xmm1, %xmm3
2435 ; SSE41-NEXT: packuswb %xmm2, %xmm3
2436 ; SSE41-NEXT: paddb %xmm3, %xmm0
2439 ; AVX1-LABEL: vec128_i8_signed_reg_reg:
2441 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
2442 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2443 ; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2444 ; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2445 ; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2446 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
2447 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2448 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2449 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2450 ; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2451 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
2452 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2453 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2454 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2455 ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2456 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
2457 ; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
2458 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2461 ; AVX2-LABEL: vec128_i8_signed_reg_reg:
2463 ; AVX2-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
2464 ; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2465 ; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2466 ; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2467 ; AVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2468 ; AVX2-NEXT: vpsrlw $1, %xmm1, %xmm1
2469 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2470 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2471 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2472 ; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2473 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
2474 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
2475 ; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2476 ; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2477 ; AVX2-NEXT: vzeroupper
2480 ; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2481 ; XOP-FALLBACK: # %bb.0:
2482 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
2483 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2484 ; XOP-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2485 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2486 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2487 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2488 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2489 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2490 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2491 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2492 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2493 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2494 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2495 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2496 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2497 ; XOP-FALLBACK-NEXT: retq
2499 ; XOPAVX1-LABEL: vec128_i8_signed_reg_reg:
2501 ; XOPAVX1-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
2502 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2503 ; XOPAVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2504 ; XOPAVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2505 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2506 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2507 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2508 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2509 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2510 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2511 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2512 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2513 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2514 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2515 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2516 ; XOPAVX1-NEXT: retq
2518 ; XOPAVX2-LABEL: vec128_i8_signed_reg_reg:
2520 ; XOPAVX2-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
2521 ; XOPAVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2522 ; XOPAVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2523 ; XOPAVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2524 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2525 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2526 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2527 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2528 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2529 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2530 ; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
2531 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
2532 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2533 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2534 ; XOPAVX2-NEXT: vzeroupper
2535 ; XOPAVX2-NEXT: retq
2537 ; AVX512F-LABEL: vec128_i8_signed_reg_reg:
2539 ; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
2540 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2541 ; AVX512F-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2542 ; AVX512F-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2543 ; AVX512F-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2544 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2545 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2546 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2547 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2548 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2549 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
2550 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
2551 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2552 ; AVX512F-NEXT: vzeroupper
2553 ; AVX512F-NEXT: retq
2555 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2556 ; AVX512VL-FALLBACK: # %bb.0:
2557 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
2558 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm3
2559 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
2560 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
2561 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1
2562 ; AVX512VL-FALLBACK-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm2
2563 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm1, %xmm2, %xmm1
2564 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2565 ; AVX512VL-FALLBACK-NEXT: retq
2567 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2568 ; AVX512BW-FALLBACK: # %bb.0:
2569 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
2570 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2571 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
2572 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2573 ; AVX512BW-FALLBACK-NEXT: vpbroadcastb {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2574 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
2575 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
2576 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2577 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2578 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2579 ; AVX512BW-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2580 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2581 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
2582 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2583 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
2584 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2585 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2586 ; AVX512BW-FALLBACK-NEXT: retq
2588 ; AVX512VLBW-LABEL: vec128_i8_signed_reg_reg:
2589 ; AVX512VLBW: # %bb.0:
2590 ; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
2591 ; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
2592 ; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2593 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2594 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2595 ; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
2596 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
2597 ; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
2598 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2599 ; AVX512VLBW-NEXT: retq
2600 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
2601 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
2602 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
2603 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
2604 %t7 = sub <16 x i8> %t6, %t5
2605 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
2606 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
2607 %a10 = add nsw <16 x i8> %t9, %a1 ; signed
2611 define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
2612 ; SSE2-LABEL: vec128_i8_unsigned_reg_reg:
2614 ; SSE2-NEXT: movdqa %xmm0, %xmm3
2615 ; SSE2-NEXT: pminub %xmm1, %xmm3
2616 ; SSE2-NEXT: movdqa %xmm0, %xmm4
2617 ; SSE2-NEXT: pcmpeqb %xmm3, %xmm4
2618 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
2619 ; SSE2-NEXT: pxor %xmm4, %xmm2
2620 ; SSE2-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
2621 ; SSE2-NEXT: pmaxub %xmm0, %xmm1
2622 ; SSE2-NEXT: psubb %xmm3, %xmm1
2623 ; SSE2-NEXT: psrlw $1, %xmm1
2624 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
2625 ; SSE2-NEXT: movdqa %xmm1, %xmm3
2626 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2627 ; SSE2-NEXT: movdqa %xmm2, %xmm4
2628 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2629 ; SSE2-NEXT: pmullw %xmm3, %xmm4
2630 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
2631 ; SSE2-NEXT: pand %xmm3, %xmm4
2632 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2633 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2634 ; SSE2-NEXT: pmullw %xmm1, %xmm2
2635 ; SSE2-NEXT: pand %xmm3, %xmm2
2636 ; SSE2-NEXT: packuswb %xmm4, %xmm2
2637 ; SSE2-NEXT: paddb %xmm2, %xmm0
2640 ; SSE41-LABEL: vec128_i8_unsigned_reg_reg:
2642 ; SSE41-NEXT: movdqa %xmm0, %xmm3
2643 ; SSE41-NEXT: pminub %xmm1, %xmm3
2644 ; SSE41-NEXT: movdqa %xmm0, %xmm4
2645 ; SSE41-NEXT: pcmpeqb %xmm3, %xmm4
2646 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
2647 ; SSE41-NEXT: pxor %xmm4, %xmm2
2648 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
2649 ; SSE41-NEXT: pmaxub %xmm0, %xmm1
2650 ; SSE41-NEXT: psubb %xmm3, %xmm1
2651 ; SSE41-NEXT: psrlw $1, %xmm1
2652 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
2653 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2654 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2655 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2656 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2657 ; SSE41-NEXT: pmullw %xmm1, %xmm2
2658 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2659 ; SSE41-NEXT: pand %xmm1, %xmm2
2660 ; SSE41-NEXT: pmullw %xmm4, %xmm3
2661 ; SSE41-NEXT: pand %xmm1, %xmm3
2662 ; SSE41-NEXT: packuswb %xmm2, %xmm3
2663 ; SSE41-NEXT: paddb %xmm3, %xmm0
2666 ; AVX1-LABEL: vec128_i8_unsigned_reg_reg:
2668 ; AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm2
2669 ; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
2670 ; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2671 ; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
2672 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
2673 ; AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2674 ; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2675 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
2676 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2677 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2678 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2679 ; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
2680 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
2681 ; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
2682 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2683 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
2684 ; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2685 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
2686 ; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2687 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2690 ; AVX2-LABEL: vec128_i8_unsigned_reg_reg:
2692 ; AVX2-NEXT: vpminub %xmm1, %xmm0, %xmm2
2693 ; AVX2-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
2694 ; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2695 ; AVX2-NEXT: vpxor %xmm4, %xmm3, %xmm3
2696 ; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
2697 ; AVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2698 ; AVX2-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2699 ; AVX2-NEXT: vpsrlw $1, %xmm1, %xmm1
2700 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2701 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2702 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
2703 ; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2704 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
2705 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
2706 ; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2707 ; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2708 ; AVX2-NEXT: vzeroupper
2711 ; XOP-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
2712 ; XOP-FALLBACK: # %bb.0:
2713 ; XOP-FALLBACK-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
2714 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2715 ; XOP-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm3
2716 ; XOP-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2717 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2718 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2719 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2720 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2721 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2722 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2723 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2724 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2725 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2726 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2727 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2728 ; XOP-FALLBACK-NEXT: retq
2730 ; XOPAVX1-LABEL: vec128_i8_unsigned_reg_reg:
2732 ; XOPAVX1-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
2733 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2734 ; XOPAVX1-NEXT: vpminub %xmm1, %xmm0, %xmm3
2735 ; XOPAVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2736 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2737 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2738 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2739 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2740 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2741 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2742 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2743 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2744 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2745 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2746 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2747 ; XOPAVX1-NEXT: retq
2749 ; XOPAVX2-LABEL: vec128_i8_unsigned_reg_reg:
2751 ; XOPAVX2-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
2752 ; XOPAVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2753 ; XOPAVX2-NEXT: vpminub %xmm1, %xmm0, %xmm3
2754 ; XOPAVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2755 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2756 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2757 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2758 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2759 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2760 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2761 ; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
2762 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
2763 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2764 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2765 ; XOPAVX2-NEXT: vzeroupper
2766 ; XOPAVX2-NEXT: retq
2768 ; AVX512F-LABEL: vec128_i8_unsigned_reg_reg:
2770 ; AVX512F-NEXT: vpminub %xmm1, %xmm0, %xmm2
2771 ; AVX512F-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
2772 ; AVX512F-NEXT: vpternlogq $15, %zmm3, %zmm3, %zmm3
2773 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
2774 ; AVX512F-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2775 ; AVX512F-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2776 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2777 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2778 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2779 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
2780 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2781 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
2782 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
2783 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2784 ; AVX512F-NEXT: vzeroupper
2785 ; AVX512F-NEXT: retq
2787 ; AVX512VL-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
2788 ; AVX512VL-FALLBACK: # %bb.0:
2789 ; AVX512VL-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
2790 ; AVX512VL-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2791 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2792 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2793 ; AVX512VL-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm2
2794 ; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %xmm2, %xmm2, %xmm2
2795 ; AVX512VL-FALLBACK-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm1
2796 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2797 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2798 ; AVX512VL-FALLBACK-NEXT: retq
2800 ; AVX512BW-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
2801 ; AVX512BW-FALLBACK: # %bb.0:
2802 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
2803 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2804 ; AVX512BW-FALLBACK-NEXT: vpcmpnleub %zmm1, %zmm0, %k1
2805 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2806 ; AVX512BW-FALLBACK-NEXT: vpbroadcastb {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2807 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
2808 ; AVX512BW-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
2809 ; AVX512BW-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2810 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2811 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2812 ; AVX512BW-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
2813 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2814 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
2815 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2816 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
2817 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2818 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2819 ; AVX512BW-FALLBACK-NEXT: retq
2821 ; AVX512VLBW-LABEL: vec128_i8_unsigned_reg_reg:
2822 ; AVX512VLBW: # %bb.0:
2823 ; AVX512VLBW-NEXT: vpcmpnleub %xmm1, %xmm0, %k1
2824 ; AVX512VLBW-NEXT: vpminub %xmm1, %xmm0, %xmm2
2825 ; AVX512VLBW-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
2826 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2827 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2828 ; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
2829 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
2830 ; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
2831 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2832 ; AVX512VLBW-NEXT: retq
2833 %t3 = icmp ugt <16 x i8> %a1, %a2
2834 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
2835 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
2836 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
2837 %t7 = sub <16 x i8> %t6, %t5
2838 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
2839 %t9 = mul <16 x i8> %t8, %t4
2840 %a10 = add <16 x i8> %t9, %a1
2844 ; Values are loaded. Only check signed case.
2846 define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind {
2847 ; SSE2-LABEL: vec128_i8_signed_mem_reg:
2849 ; SSE2-NEXT: movdqa %xmm0, %xmm1
2850 ; SSE2-NEXT: movdqa (%rdi), %xmm2
2851 ; SSE2-NEXT: movdqa %xmm2, %xmm3
2852 ; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
2853 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2854 ; SSE2-NEXT: por %xmm3, %xmm0
2855 ; SSE2-NEXT: movdqa %xmm2, %xmm4
2856 ; SSE2-NEXT: psubb %xmm1, %xmm4
2857 ; SSE2-NEXT: psubb %xmm2, %xmm1
2858 ; SSE2-NEXT: pand %xmm3, %xmm4
2859 ; SSE2-NEXT: pandn %xmm1, %xmm3
2860 ; SSE2-NEXT: por %xmm4, %xmm3
2861 ; SSE2-NEXT: psrlw $1, %xmm3
2862 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
2863 ; SSE2-NEXT: movdqa %xmm3, %xmm1
2864 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2865 ; SSE2-NEXT: movdqa %xmm0, %xmm4
2866 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2867 ; SSE2-NEXT: pmullw %xmm1, %xmm4
2868 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2869 ; SSE2-NEXT: pand %xmm1, %xmm4
2870 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2871 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
2872 ; SSE2-NEXT: pmullw %xmm3, %xmm0
2873 ; SSE2-NEXT: pand %xmm1, %xmm0
2874 ; SSE2-NEXT: packuswb %xmm4, %xmm0
2875 ; SSE2-NEXT: paddb %xmm2, %xmm0
2878 ; SSE41-LABEL: vec128_i8_signed_mem_reg:
2880 ; SSE41-NEXT: movdqa (%rdi), %xmm2
2881 ; SSE41-NEXT: movdqa %xmm2, %xmm3
2882 ; SSE41-NEXT: pcmpgtb %xmm0, %xmm3
2883 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
2884 ; SSE41-NEXT: movdqa %xmm2, %xmm1
2885 ; SSE41-NEXT: pminsb %xmm0, %xmm1
2886 ; SSE41-NEXT: pmaxsb %xmm2, %xmm0
2887 ; SSE41-NEXT: psubb %xmm1, %xmm0
2888 ; SSE41-NEXT: psrlw $1, %xmm0
2889 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
2890 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2891 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2892 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
2893 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2894 ; SSE41-NEXT: pmullw %xmm0, %xmm3
2895 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
2896 ; SSE41-NEXT: pand %xmm0, %xmm3
2897 ; SSE41-NEXT: pmullw %xmm4, %xmm1
2898 ; SSE41-NEXT: pand %xmm0, %xmm1
2899 ; SSE41-NEXT: packuswb %xmm3, %xmm1
2900 ; SSE41-NEXT: paddb %xmm2, %xmm1
2901 ; SSE41-NEXT: movdqa %xmm1, %xmm0
2904 ; AVX1-LABEL: vec128_i8_signed_mem_reg:
2906 ; AVX1-NEXT: vmovdqa (%rdi), %xmm1
2907 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
2908 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2909 ; AVX1-NEXT: vpminsb %xmm0, %xmm1, %xmm3
2910 ; AVX1-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
2911 ; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
2912 ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
2913 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
2914 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2915 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2916 ; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2917 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
2918 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
2919 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2920 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2921 ; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2922 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
2923 ; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
2924 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
2927 ; AVX2-LABEL: vec128_i8_signed_mem_reg:
2929 ; AVX2-NEXT: vmovdqa (%rdi), %xmm1
2930 ; AVX2-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
2931 ; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2932 ; AVX2-NEXT: vpminsb %xmm0, %xmm1, %xmm3
2933 ; AVX2-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
2934 ; AVX2-NEXT: vpsubb %xmm3, %xmm0, %xmm0
2935 ; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
2936 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
2937 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
2938 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2939 ; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
2940 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
2941 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
2942 ; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
2943 ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
2944 ; AVX2-NEXT: vzeroupper
2947 ; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
2948 ; XOP-FALLBACK: # %bb.0:
2949 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2950 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm0, %xmm1, %xmm2
2951 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2952 ; XOP-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm3
2953 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
2954 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm0, %xmm0
2955 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2956 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm0, %xmm0
2957 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2958 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2959 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2960 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2961 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2962 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2963 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2964 ; XOP-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
2965 ; XOP-FALLBACK-NEXT: retq
2967 ; XOPAVX1-LABEL: vec128_i8_signed_mem_reg:
2969 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
2970 ; XOPAVX1-NEXT: vpcomgtb %xmm0, %xmm1, %xmm2
2971 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2972 ; XOPAVX1-NEXT: vpminsb %xmm0, %xmm1, %xmm3
2973 ; XOPAVX1-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
2974 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
2975 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2976 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm0
2977 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2978 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
2979 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2980 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
2981 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2982 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2983 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2984 ; XOPAVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
2985 ; XOPAVX1-NEXT: retq
2987 ; XOPAVX2-LABEL: vec128_i8_signed_mem_reg:
2989 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
2990 ; XOPAVX2-NEXT: vpcomgtb %xmm0, %xmm1, %xmm2
2991 ; XOPAVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
2992 ; XOPAVX2-NEXT: vpminsb %xmm0, %xmm1, %xmm3
2993 ; XOPAVX2-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
2994 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm0, %xmm0
2995 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2996 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm0, %xmm0
2997 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
2998 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2999 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3000 ; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
3001 ; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
3002 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
3003 ; XOPAVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3004 ; XOPAVX2-NEXT: vzeroupper
3005 ; XOPAVX2-NEXT: retq
3007 ; AVX512F-LABEL: vec128_i8_signed_mem_reg:
3009 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
3010 ; AVX512F-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
3011 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3012 ; AVX512F-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3013 ; AVX512F-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3014 ; AVX512F-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3015 ; AVX512F-NEXT: vpsrlw $1, %xmm0, %xmm0
3016 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
3017 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3018 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3019 ; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3020 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
3021 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
3022 ; AVX512F-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3023 ; AVX512F-NEXT: vzeroupper
3024 ; AVX512F-NEXT: retq
3026 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
3027 ; AVX512VL-FALLBACK: # %bb.0:
3028 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3029 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm2
3030 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm3
3031 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
3032 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
3033 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
3034 ; AVX512VL-FALLBACK-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2
3035 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm0, %xmm2, %xmm0
3036 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3037 ; AVX512VL-FALLBACK-NEXT: retq
3039 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
3040 ; AVX512BW-FALLBACK: # %bb.0:
3041 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
3042 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3043 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm0, %zmm1, %k1
3044 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3045 ; AVX512BW-FALLBACK-NEXT: vpbroadcastb {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3046 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
3047 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm2
3048 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3049 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm0, %xmm0
3050 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
3051 ; AVX512BW-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
3052 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3053 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3054 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3055 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm0, %ymm0
3056 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3057 ; AVX512BW-FALLBACK-NEXT: vzeroupper
3058 ; AVX512BW-FALLBACK-NEXT: retq
3060 ; AVX512VLBW-LABEL: vec128_i8_signed_mem_reg:
3061 ; AVX512VLBW: # %bb.0:
3062 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
3063 ; AVX512VLBW-NEXT: vpcmpgtb %xmm0, %xmm1, %k1
3064 ; AVX512VLBW-NEXT: vpminsb %xmm0, %xmm1, %xmm2
3065 ; AVX512VLBW-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3066 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm0, %xmm0
3067 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm0, %xmm0
3068 ; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
3069 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
3070 ; AVX512VLBW-NEXT: vpsubb %xmm0, %xmm2, %xmm0 {%k1}
3071 ; AVX512VLBW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3072 ; AVX512VLBW-NEXT: retq
3073 %a1 = load <16 x i8>, ptr %a1_addr
3074 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
3075 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3076 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
3077 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
3078 %t7 = sub <16 x i8> %t6, %t5
3079 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3080 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
3081 %a10 = add nsw <16 x i8> %t9, %a1 ; signed
3085 define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind {
3086 ; SSE2-LABEL: vec128_i8_signed_reg_mem:
3088 ; SSE2-NEXT: movdqa (%rdi), %xmm3
3089 ; SSE2-NEXT: movdqa %xmm0, %xmm2
3090 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm2
3091 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3092 ; SSE2-NEXT: por %xmm2, %xmm1
3093 ; SSE2-NEXT: movdqa %xmm0, %xmm4
3094 ; SSE2-NEXT: psubb %xmm3, %xmm4
3095 ; SSE2-NEXT: psubb %xmm0, %xmm3
3096 ; SSE2-NEXT: pand %xmm2, %xmm4
3097 ; SSE2-NEXT: pandn %xmm3, %xmm2
3098 ; SSE2-NEXT: por %xmm4, %xmm2
3099 ; SSE2-NEXT: psrlw $1, %xmm2
3100 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
3101 ; SSE2-NEXT: movdqa %xmm2, %xmm3
3102 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3103 ; SSE2-NEXT: movdqa %xmm1, %xmm4
3104 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3105 ; SSE2-NEXT: pmullw %xmm3, %xmm4
3106 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
3107 ; SSE2-NEXT: pand %xmm3, %xmm4
3108 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3109 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3110 ; SSE2-NEXT: pmullw %xmm2, %xmm1
3111 ; SSE2-NEXT: pand %xmm3, %xmm1
3112 ; SSE2-NEXT: packuswb %xmm4, %xmm1
3113 ; SSE2-NEXT: paddb %xmm1, %xmm0
3116 ; SSE41-LABEL: vec128_i8_signed_reg_mem:
3118 ; SSE41-NEXT: movdqa (%rdi), %xmm2
3119 ; SSE41-NEXT: movdqa %xmm0, %xmm1
3120 ; SSE41-NEXT: pcmpgtb %xmm2, %xmm1
3121 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
3122 ; SSE41-NEXT: movdqa %xmm0, %xmm3
3123 ; SSE41-NEXT: pminsb %xmm2, %xmm3
3124 ; SSE41-NEXT: pmaxsb %xmm0, %xmm2
3125 ; SSE41-NEXT: psubb %xmm3, %xmm2
3126 ; SSE41-NEXT: psrlw $1, %xmm2
3127 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
3128 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3129 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3130 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3131 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3132 ; SSE41-NEXT: pmullw %xmm2, %xmm1
3133 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
3134 ; SSE41-NEXT: pand %xmm2, %xmm1
3135 ; SSE41-NEXT: pmullw %xmm4, %xmm3
3136 ; SSE41-NEXT: pand %xmm2, %xmm3
3137 ; SSE41-NEXT: packuswb %xmm1, %xmm3
3138 ; SSE41-NEXT: paddb %xmm3, %xmm0
3141 ; AVX1-LABEL: vec128_i8_signed_reg_mem:
3143 ; AVX1-NEXT: vmovdqa (%rdi), %xmm1
3144 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3145 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3146 ; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3147 ; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3148 ; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3149 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
3150 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3151 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3152 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3153 ; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3154 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
3155 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
3156 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3157 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3158 ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3159 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
3160 ; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
3161 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3164 ; AVX2-LABEL: vec128_i8_signed_reg_mem:
3166 ; AVX2-NEXT: vmovdqa (%rdi), %xmm1
3167 ; AVX2-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3168 ; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3169 ; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3170 ; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3171 ; AVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3172 ; AVX2-NEXT: vpsrlw $1, %xmm1, %xmm1
3173 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3174 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3175 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3176 ; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3177 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3178 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
3179 ; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3180 ; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3181 ; AVX2-NEXT: vzeroupper
3184 ; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3185 ; XOP-FALLBACK: # %bb.0:
3186 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3187 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3188 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3189 ; XOP-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3190 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3191 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3192 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3193 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3194 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3195 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3196 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3197 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3198 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3199 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3200 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3201 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3202 ; XOP-FALLBACK-NEXT: retq
3204 ; XOPAVX1-LABEL: vec128_i8_signed_reg_mem:
3206 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
3207 ; XOPAVX1-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3208 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3209 ; XOPAVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3210 ; XOPAVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3211 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3212 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3213 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3214 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3215 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3216 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3217 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3218 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3219 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3220 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3221 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3222 ; XOPAVX1-NEXT: retq
3224 ; XOPAVX2-LABEL: vec128_i8_signed_reg_mem:
3226 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
3227 ; XOPAVX2-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3228 ; XOPAVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3229 ; XOPAVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3230 ; XOPAVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3231 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3232 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3233 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3234 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3235 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3236 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3237 ; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3238 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
3239 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3240 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3241 ; XOPAVX2-NEXT: vzeroupper
3242 ; XOPAVX2-NEXT: retq
3244 ; AVX512F-LABEL: vec128_i8_signed_reg_mem:
3246 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
3247 ; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3248 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3249 ; AVX512F-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3250 ; AVX512F-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3251 ; AVX512F-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3252 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
3253 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3254 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3255 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3256 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3257 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3258 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
3259 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3260 ; AVX512F-NEXT: vzeroupper
3261 ; AVX512F-NEXT: retq
3263 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3264 ; AVX512VL-FALLBACK: # %bb.0:
3265 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3266 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3267 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm3
3268 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
3269 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
3270 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1
3271 ; AVX512VL-FALLBACK-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm2
3272 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm1, %xmm2, %xmm1
3273 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3274 ; AVX512VL-FALLBACK-NEXT: retq
3276 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3277 ; AVX512BW-FALLBACK: # %bb.0:
3278 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
3279 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3280 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
3281 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3282 ; AVX512BW-FALLBACK-NEXT: vpbroadcastb {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3283 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
3284 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3285 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3286 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3287 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3288 ; AVX512BW-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3289 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3290 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3291 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3292 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
3293 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3294 ; AVX512BW-FALLBACK-NEXT: vzeroupper
3295 ; AVX512BW-FALLBACK-NEXT: retq
3297 ; AVX512VLBW-LABEL: vec128_i8_signed_reg_mem:
3298 ; AVX512VLBW: # %bb.0:
3299 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
3300 ; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
3301 ; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3302 ; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3303 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3304 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
3305 ; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
3306 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
3307 ; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
3308 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3309 ; AVX512VLBW-NEXT: retq
3310 %a2 = load <16 x i8>, ptr %a2_addr
3311 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
3312 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3313 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
3314 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
3315 %t7 = sub <16 x i8> %t6, %t5
3316 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3317 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
3318 %a10 = add nsw <16 x i8> %t9, %a1 ; signed
3322 define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
3323 ; SSE2-LABEL: vec128_i8_signed_mem_mem:
3325 ; SSE2-NEXT: movdqa (%rdi), %xmm1
3326 ; SSE2-NEXT: movdqa (%rsi), %xmm3
3327 ; SSE2-NEXT: movdqa %xmm1, %xmm2
3328 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm2
3329 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3330 ; SSE2-NEXT: por %xmm2, %xmm0
3331 ; SSE2-NEXT: movdqa %xmm1, %xmm4
3332 ; SSE2-NEXT: psubb %xmm3, %xmm4
3333 ; SSE2-NEXT: psubb %xmm1, %xmm3
3334 ; SSE2-NEXT: pand %xmm2, %xmm4
3335 ; SSE2-NEXT: pandn %xmm3, %xmm2
3336 ; SSE2-NEXT: por %xmm4, %xmm2
3337 ; SSE2-NEXT: psrlw $1, %xmm2
3338 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
3339 ; SSE2-NEXT: movdqa %xmm2, %xmm3
3340 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3341 ; SSE2-NEXT: movdqa %xmm0, %xmm4
3342 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3343 ; SSE2-NEXT: pmullw %xmm3, %xmm4
3344 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
3345 ; SSE2-NEXT: pand %xmm3, %xmm4
3346 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3347 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3348 ; SSE2-NEXT: pmullw %xmm2, %xmm0
3349 ; SSE2-NEXT: pand %xmm3, %xmm0
3350 ; SSE2-NEXT: packuswb %xmm4, %xmm0
3351 ; SSE2-NEXT: paddb %xmm1, %xmm0
3354 ; SSE41-LABEL: vec128_i8_signed_mem_mem:
3356 ; SSE41-NEXT: movdqa (%rdi), %xmm1
3357 ; SSE41-NEXT: movdqa (%rsi), %xmm3
3358 ; SSE41-NEXT: movdqa %xmm1, %xmm2
3359 ; SSE41-NEXT: pcmpgtb %xmm3, %xmm2
3360 ; SSE41-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
3361 ; SSE41-NEXT: movdqa %xmm1, %xmm0
3362 ; SSE41-NEXT: pminsb %xmm3, %xmm0
3363 ; SSE41-NEXT: pmaxsb %xmm1, %xmm3
3364 ; SSE41-NEXT: psubb %xmm0, %xmm3
3365 ; SSE41-NEXT: psrlw $1, %xmm3
3366 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
3367 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
3368 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3369 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3370 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3371 ; SSE41-NEXT: pmullw %xmm3, %xmm2
3372 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
3373 ; SSE41-NEXT: pand %xmm3, %xmm2
3374 ; SSE41-NEXT: pmullw %xmm4, %xmm0
3375 ; SSE41-NEXT: pand %xmm3, %xmm0
3376 ; SSE41-NEXT: packuswb %xmm2, %xmm0
3377 ; SSE41-NEXT: paddb %xmm1, %xmm0
3380 ; AVX1-LABEL: vec128_i8_signed_mem_mem:
3382 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
3383 ; AVX1-NEXT: vmovdqa (%rsi), %xmm1
3384 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3385 ; AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3386 ; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3387 ; AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3388 ; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3389 ; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
3390 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3391 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3392 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3393 ; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3394 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
3395 ; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
3396 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3397 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3398 ; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3399 ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
3400 ; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
3401 ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3404 ; AVX2-LABEL: vec128_i8_signed_mem_mem:
3406 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
3407 ; AVX2-NEXT: vmovdqa (%rsi), %xmm1
3408 ; AVX2-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3409 ; AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3410 ; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3411 ; AVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3412 ; AVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3413 ; AVX2-NEXT: vpsrlw $1, %xmm1, %xmm1
3414 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3415 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3416 ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3417 ; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3418 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3419 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
3420 ; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3421 ; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3422 ; AVX2-NEXT: vzeroupper
3425 ; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3426 ; XOP-FALLBACK: # %bb.0:
3427 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3428 ; XOP-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3429 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3430 ; XOP-FALLBACK-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3431 ; XOP-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3432 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3433 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3434 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3435 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3436 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3437 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3438 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3439 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3440 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3441 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3442 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3443 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3444 ; XOP-FALLBACK-NEXT: retq
3446 ; XOPAVX1-LABEL: vec128_i8_signed_mem_mem:
3448 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm0
3449 ; XOPAVX1-NEXT: vmovdqa (%rsi), %xmm1
3450 ; XOPAVX1-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3451 ; XOPAVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3452 ; XOPAVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3453 ; XOPAVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3454 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3455 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3456 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3457 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3458 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3459 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3460 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3461 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3462 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3463 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3464 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3465 ; XOPAVX1-NEXT: retq
3467 ; XOPAVX2-LABEL: vec128_i8_signed_mem_mem:
3469 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm0
3470 ; XOPAVX2-NEXT: vmovdqa (%rsi), %xmm1
3471 ; XOPAVX2-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3472 ; XOPAVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3473 ; XOPAVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3474 ; XOPAVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3475 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3476 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3477 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3478 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3479 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3480 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3481 ; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
3482 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
3483 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3484 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3485 ; XOPAVX2-NEXT: vzeroupper
3486 ; XOPAVX2-NEXT: retq
3488 ; AVX512F-LABEL: vec128_i8_signed_mem_mem:
3490 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
3491 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
3492 ; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3493 ; AVX512F-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
3494 ; AVX512F-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3495 ; AVX512F-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3496 ; AVX512F-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3497 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
3498 ; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3499 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3500 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3501 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3502 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3503 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
3504 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3505 ; AVX512F-NEXT: vzeroupper
3506 ; AVX512F-NEXT: retq
3508 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3509 ; AVX512VL-FALLBACK: # %bb.0:
3510 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3511 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3512 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3513 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm3
3514 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm3, %xmm2
3515 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm2, %xmm2
3516 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm1
3517 ; AVX512VL-FALLBACK-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm2
3518 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm1, %xmm2, %xmm1
3519 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3520 ; AVX512VL-FALLBACK-NEXT: retq
3522 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3523 ; AVX512BW-FALLBACK: # %bb.0:
3524 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3525 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3526 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
3527 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3528 ; AVX512BW-FALLBACK-NEXT: vpbroadcastb {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3529 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
3530 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3531 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3532 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3533 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3534 ; AVX512BW-FALLBACK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
3535 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3536 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3537 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3538 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
3539 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3540 ; AVX512BW-FALLBACK-NEXT: vzeroupper
3541 ; AVX512BW-FALLBACK-NEXT: retq
3543 ; AVX512VLBW-LABEL: vec128_i8_signed_mem_mem:
3544 ; AVX512VLBW: # %bb.0:
3545 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm0
3546 ; AVX512VLBW-NEXT: vmovdqa (%rsi), %xmm1
3547 ; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
3548 ; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3549 ; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3550 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3551 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
3552 ; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
3553 ; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
3554 ; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm1 {%k1}
3555 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3556 ; AVX512VLBW-NEXT: retq
3557 %a1 = load <16 x i8>, ptr %a1_addr
3558 %a2 = load <16 x i8>, ptr %a2_addr
3559 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
3560 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3561 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
3562 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
3563 %t7 = sub <16 x i8> %t6, %t5
3564 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3565 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
3566 %a10 = add nsw <16 x i8> %t9, %a1 ; signed