1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1,AVX1-FALLBACK
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-FALLBACK
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=ALL,XOP,XOP-FALLBACK
7 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX1,XOPAVX,XOPAVX1,XOPAVX1-FALLBACK
8 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX2,XOPAVX,XOPAVX2,XOPAVX2-FALLBACK
9 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
10 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512VL-FALLBACK
11 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW,AVX512BW-FALLBACK
12 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512BW,AVX512VLBW
14 ; These test cases are inspired by C++2a std::midpoint().
15 ; See https://bugs.llvm.org/show_bug.cgi?id=40965
17 ; Using 128-bit vector regs.
19 ; ---------------------------------------------------------------------------- ;
20 ; 32-bit width. 128 / 32 = 4 elts.
21 ; ---------------------------------------------------------------------------- ;
23 ; Values come from regs
25 define <4 x i32> @vec128_i32_signed_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
26 ; SSE2-LABEL: vec128_i32_signed_reg_reg:
28 ; SSE2-NEXT: movdqa %xmm0, %xmm2
29 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
30 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
31 ; SSE2-NEXT: por %xmm2, %xmm3
32 ; SSE2-NEXT: movdqa %xmm1, %xmm4
33 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
34 ; SSE2-NEXT: movdqa %xmm0, %xmm5
35 ; SSE2-NEXT: pand %xmm4, %xmm5
36 ; SSE2-NEXT: pandn %xmm1, %xmm4
37 ; SSE2-NEXT: por %xmm5, %xmm4
38 ; SSE2-NEXT: movdqa %xmm0, %xmm5
39 ; SSE2-NEXT: pand %xmm2, %xmm5
40 ; SSE2-NEXT: pandn %xmm1, %xmm2
41 ; SSE2-NEXT: por %xmm5, %xmm2
42 ; SSE2-NEXT: psubd %xmm4, %xmm2
43 ; SSE2-NEXT: psrld $1, %xmm2
44 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
45 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
46 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
47 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
48 ; SSE2-NEXT: pmuludq %xmm1, %xmm3
49 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
50 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
51 ; SSE2-NEXT: paddd %xmm2, %xmm0
54 ; SSE41-LABEL: vec128_i32_signed_reg_reg:
56 ; SSE41-NEXT: movdqa %xmm0, %xmm2
57 ; SSE41-NEXT: pcmpgtd %xmm1, %xmm2
58 ; SSE41-NEXT: por {{.*}}(%rip), %xmm2
59 ; SSE41-NEXT: movdqa %xmm0, %xmm3
60 ; SSE41-NEXT: pminsd %xmm1, %xmm3
61 ; SSE41-NEXT: pmaxsd %xmm0, %xmm1
62 ; SSE41-NEXT: psubd %xmm3, %xmm1
63 ; SSE41-NEXT: psrld $1, %xmm1
64 ; SSE41-NEXT: pmulld %xmm1, %xmm2
65 ; SSE41-NEXT: paddd %xmm0, %xmm2
66 ; SSE41-NEXT: movdqa %xmm2, %xmm0
69 ; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
70 ; AVX1-FALLBACK: # %bb.0:
71 ; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
72 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
73 ; AVX1-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
74 ; AVX1-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
75 ; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
76 ; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
77 ; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
78 ; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
79 ; AVX1-FALLBACK-NEXT: retq
81 ; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
82 ; AVX2-FALLBACK: # %bb.0:
83 ; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
84 ; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
85 ; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
86 ; AVX2-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
87 ; AVX2-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
88 ; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
89 ; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
90 ; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
91 ; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
92 ; AVX2-FALLBACK-NEXT: retq
94 ; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
95 ; XOP-FALLBACK: # %bb.0:
96 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
97 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
98 ; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
99 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
100 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
101 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
102 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
103 ; XOP-FALLBACK-NEXT: retq
105 ; XOPAVX1-LABEL: vec128_i32_signed_reg_reg:
107 ; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
108 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
109 ; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
110 ; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
111 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
112 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
113 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
116 ; XOPAVX2-LABEL: vec128_i32_signed_reg_reg:
118 ; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
119 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
120 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
121 ; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
122 ; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
123 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
124 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
125 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
128 ; AVX512F-LABEL: vec128_i32_signed_reg_reg:
130 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
131 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
132 ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
133 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
134 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
135 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
136 ; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
137 ; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
138 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
139 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
140 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
141 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
142 ; AVX512F-NEXT: vzeroupper
145 ; AVX512VL-LABEL: vec128_i32_signed_reg_reg:
147 ; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
148 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
149 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
150 ; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
151 ; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
152 ; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
153 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
154 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
155 ; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
156 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
157 ; AVX512VL-NEXT: retq
159 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
160 ; AVX512BW-FALLBACK: # %bb.0:
161 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
162 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
163 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
164 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
165 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
166 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
167 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
168 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
169 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
170 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
171 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
172 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
173 ; AVX512BW-FALLBACK-NEXT: vzeroupper
174 ; AVX512BW-FALLBACK-NEXT: retq
175 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
176 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
177 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
178 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
179 %t7 = sub <4 x i32> %t6, %t5
180 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
181 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
182 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
186 define <4 x i32> @vec128_i32_unsigned_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
187 ; SSE2-LABEL: vec128_i32_unsigned_reg_reg:
189 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
190 ; SSE2-NEXT: movdqa %xmm1, %xmm3
191 ; SSE2-NEXT: pxor %xmm2, %xmm3
192 ; SSE2-NEXT: pxor %xmm0, %xmm2
193 ; SSE2-NEXT: movdqa %xmm2, %xmm4
194 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
195 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [1,1,1,1]
196 ; SSE2-NEXT: por %xmm4, %xmm5
197 ; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
198 ; SSE2-NEXT: movdqa %xmm0, %xmm2
199 ; SSE2-NEXT: pand %xmm3, %xmm2
200 ; SSE2-NEXT: pandn %xmm1, %xmm3
201 ; SSE2-NEXT: por %xmm2, %xmm3
202 ; SSE2-NEXT: movdqa %xmm0, %xmm2
203 ; SSE2-NEXT: pand %xmm4, %xmm2
204 ; SSE2-NEXT: pandn %xmm1, %xmm4
205 ; SSE2-NEXT: por %xmm2, %xmm4
206 ; SSE2-NEXT: psubd %xmm3, %xmm4
207 ; SSE2-NEXT: psrld $1, %xmm4
208 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
209 ; SSE2-NEXT: pmuludq %xmm5, %xmm4
210 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
211 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
212 ; SSE2-NEXT: pmuludq %xmm1, %xmm3
213 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
214 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
215 ; SSE2-NEXT: paddd %xmm2, %xmm0
218 ; SSE41-LABEL: vec128_i32_unsigned_reg_reg:
220 ; SSE41-NEXT: movdqa %xmm0, %xmm2
221 ; SSE41-NEXT: pminud %xmm1, %xmm2
222 ; SSE41-NEXT: movdqa %xmm0, %xmm3
223 ; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
224 ; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
225 ; SSE41-NEXT: pxor %xmm3, %xmm4
226 ; SSE41-NEXT: por {{.*}}(%rip), %xmm4
227 ; SSE41-NEXT: pmaxud %xmm0, %xmm1
228 ; SSE41-NEXT: psubd %xmm2, %xmm1
229 ; SSE41-NEXT: psrld $1, %xmm1
230 ; SSE41-NEXT: pmulld %xmm1, %xmm4
231 ; SSE41-NEXT: paddd %xmm4, %xmm0
234 ; AVX1-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
235 ; AVX1-FALLBACK: # %bb.0:
236 ; AVX1-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm2
237 ; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm3
238 ; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
239 ; AVX1-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
240 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
241 ; AVX1-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
242 ; AVX1-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
243 ; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
244 ; AVX1-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
245 ; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
246 ; AVX1-FALLBACK-NEXT: retq
248 ; AVX2-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
249 ; AVX2-FALLBACK: # %bb.0:
250 ; AVX2-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm2
251 ; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm3
252 ; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
253 ; AVX2-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
254 ; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm4 = [1,1,1,1]
255 ; AVX2-FALLBACK-NEXT: vpor %xmm4, %xmm3, %xmm3
256 ; AVX2-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
257 ; AVX2-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
258 ; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
259 ; AVX2-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
260 ; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
261 ; AVX2-FALLBACK-NEXT: retq
263 ; XOP-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
264 ; XOP-FALLBACK: # %bb.0:
265 ; XOP-FALLBACK-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
266 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
267 ; XOP-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm3
268 ; XOP-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
269 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
270 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
271 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
272 ; XOP-FALLBACK-NEXT: retq
274 ; XOPAVX1-LABEL: vec128_i32_unsigned_reg_reg:
276 ; XOPAVX1-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
277 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
278 ; XOPAVX1-NEXT: vpminud %xmm1, %xmm0, %xmm3
279 ; XOPAVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
280 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
281 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
282 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
285 ; XOPAVX2-LABEL: vec128_i32_unsigned_reg_reg:
287 ; XOPAVX2-NEXT: vpcomgtud %xmm1, %xmm0, %xmm2
288 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
289 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
290 ; XOPAVX2-NEXT: vpminud %xmm1, %xmm0, %xmm3
291 ; XOPAVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
292 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
293 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
294 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
297 ; AVX512F-LABEL: vec128_i32_unsigned_reg_reg:
299 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
300 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
301 ; AVX512F-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
302 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
303 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
304 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
305 ; AVX512F-NEXT: vpminud %xmm1, %xmm0, %xmm2
306 ; AVX512F-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
307 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
308 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
309 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
310 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
311 ; AVX512F-NEXT: vzeroupper
314 ; AVX512VL-LABEL: vec128_i32_unsigned_reg_reg:
316 ; AVX512VL-NEXT: vpcmpnleud %xmm1, %xmm0, %k1
317 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
318 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
319 ; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
320 ; AVX512VL-NEXT: vpminud %xmm1, %xmm0, %xmm2
321 ; AVX512VL-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
322 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
323 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
324 ; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
325 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
326 ; AVX512VL-NEXT: retq
328 ; AVX512BW-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
329 ; AVX512BW-FALLBACK: # %bb.0:
330 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
331 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
332 ; AVX512BW-FALLBACK-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
333 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
334 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
335 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
336 ; AVX512BW-FALLBACK-NEXT: vpminud %xmm1, %xmm0, %xmm2
337 ; AVX512BW-FALLBACK-NEXT: vpmaxud %xmm1, %xmm0, %xmm1
338 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
339 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
340 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
341 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
342 ; AVX512BW-FALLBACK-NEXT: vzeroupper
343 ; AVX512BW-FALLBACK-NEXT: retq
344 %t3 = icmp ugt <4 x i32> %a1, %a2
345 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
346 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
347 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
348 %t7 = sub <4 x i32> %t6, %t5
349 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
350 %t9 = mul <4 x i32> %t8, %t4
351 %a10 = add <4 x i32> %t9, %a1
355 ; Values are loaded. Only check signed case.
357 define <4 x i32> @vec128_i32_signed_mem_reg(<4 x i32>* %a1_addr, <4 x i32> %a2) nounwind {
358 ; SSE2-LABEL: vec128_i32_signed_mem_reg:
360 ; SSE2-NEXT: movdqa (%rdi), %xmm1
361 ; SSE2-NEXT: movdqa %xmm1, %xmm2
362 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
363 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
364 ; SSE2-NEXT: por %xmm2, %xmm3
365 ; SSE2-NEXT: movdqa %xmm0, %xmm4
366 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
367 ; SSE2-NEXT: movdqa %xmm1, %xmm5
368 ; SSE2-NEXT: pand %xmm4, %xmm5
369 ; SSE2-NEXT: pandn %xmm0, %xmm4
370 ; SSE2-NEXT: por %xmm5, %xmm4
371 ; SSE2-NEXT: movdqa %xmm1, %xmm5
372 ; SSE2-NEXT: pand %xmm2, %xmm5
373 ; SSE2-NEXT: pandn %xmm0, %xmm2
374 ; SSE2-NEXT: por %xmm5, %xmm2
375 ; SSE2-NEXT: psubd %xmm4, %xmm2
376 ; SSE2-NEXT: psrld $1, %xmm2
377 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
378 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
379 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
380 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
381 ; SSE2-NEXT: pmuludq %xmm4, %xmm2
382 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
383 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
384 ; SSE2-NEXT: paddd %xmm1, %xmm0
387 ; SSE41-LABEL: vec128_i32_signed_mem_reg:
389 ; SSE41-NEXT: movdqa (%rdi), %xmm1
390 ; SSE41-NEXT: movdqa %xmm1, %xmm2
391 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
392 ; SSE41-NEXT: por {{.*}}(%rip), %xmm2
393 ; SSE41-NEXT: movdqa %xmm1, %xmm3
394 ; SSE41-NEXT: pminsd %xmm0, %xmm3
395 ; SSE41-NEXT: pmaxsd %xmm1, %xmm0
396 ; SSE41-NEXT: psubd %xmm3, %xmm0
397 ; SSE41-NEXT: psrld $1, %xmm0
398 ; SSE41-NEXT: pmulld %xmm2, %xmm0
399 ; SSE41-NEXT: paddd %xmm1, %xmm0
402 ; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
403 ; AVX1-FALLBACK: # %bb.0:
404 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
405 ; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2
406 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
407 ; AVX1-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm3
408 ; AVX1-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
409 ; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm0, %xmm0
410 ; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
411 ; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm0, %xmm0
412 ; AVX1-FALLBACK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
413 ; AVX1-FALLBACK-NEXT: retq
415 ; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
416 ; AVX2-FALLBACK: # %bb.0:
417 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
418 ; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2
419 ; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
420 ; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
421 ; AVX2-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm3
422 ; AVX2-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
423 ; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm0, %xmm0
424 ; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
425 ; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm0, %xmm0
426 ; AVX2-FALLBACK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
427 ; AVX2-FALLBACK-NEXT: retq
429 ; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
430 ; XOP-FALLBACK: # %bb.0:
431 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
432 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
433 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
434 ; XOP-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm3
435 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
436 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm0, %xmm0
437 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
438 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
439 ; XOP-FALLBACK-NEXT: retq
441 ; XOPAVX1-LABEL: vec128_i32_signed_mem_reg:
443 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
444 ; XOPAVX1-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
445 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
446 ; XOPAVX1-NEXT: vpminsd %xmm0, %xmm1, %xmm3
447 ; XOPAVX1-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
448 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
449 ; XOPAVX1-NEXT: vpsrld $1, %xmm0, %xmm0
450 ; XOPAVX1-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
453 ; XOPAVX2-LABEL: vec128_i32_signed_mem_reg:
455 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
456 ; XOPAVX2-NEXT: vpcomgtd %xmm0, %xmm1, %xmm2
457 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
458 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
459 ; XOPAVX2-NEXT: vpminsd %xmm0, %xmm1, %xmm3
460 ; XOPAVX2-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
461 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
462 ; XOPAVX2-NEXT: vpsrld $1, %xmm0, %xmm0
463 ; XOPAVX2-NEXT: vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
466 ; AVX512F-LABEL: vec128_i32_signed_mem_reg:
468 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
469 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
470 ; AVX512F-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
471 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
472 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
473 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
474 ; AVX512F-NEXT: vpminsd %xmm0, %xmm1, %xmm2
475 ; AVX512F-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
476 ; AVX512F-NEXT: vpsubd %xmm2, %xmm0, %xmm0
477 ; AVX512F-NEXT: vpsrld $1, %xmm0, %xmm0
478 ; AVX512F-NEXT: vpmulld %xmm3, %xmm0, %xmm0
479 ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0
480 ; AVX512F-NEXT: vzeroupper
483 ; AVX512VL-LABEL: vec128_i32_signed_mem_reg:
485 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
486 ; AVX512VL-NEXT: vpcmpgtd %xmm0, %xmm1, %k1
487 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
488 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
489 ; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
490 ; AVX512VL-NEXT: vpminsd %xmm0, %xmm1, %xmm2
491 ; AVX512VL-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
492 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm0, %xmm0
493 ; AVX512VL-NEXT: vpsrld $1, %xmm0, %xmm0
494 ; AVX512VL-NEXT: vpmulld %xmm3, %xmm0, %xmm0
495 ; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
496 ; AVX512VL-NEXT: retq
498 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
499 ; AVX512BW-FALLBACK: # %bb.0:
500 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
501 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
502 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
503 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
504 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
505 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
506 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm0, %xmm1, %xmm2
507 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm0, %xmm1, %xmm0
508 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm0, %xmm0
509 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm0, %xmm0
510 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm0, %xmm0
511 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
512 ; AVX512BW-FALLBACK-NEXT: vzeroupper
513 ; AVX512BW-FALLBACK-NEXT: retq
514 %a1 = load <4 x i32>, <4 x i32>* %a1_addr
515 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
516 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
517 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
518 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
519 %t7 = sub <4 x i32> %t6, %t5
520 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
521 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
522 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
526 define <4 x i32> @vec128_i32_signed_reg_mem(<4 x i32> %a1, <4 x i32>* %a2_addr) nounwind {
527 ; SSE2-LABEL: vec128_i32_signed_reg_mem:
529 ; SSE2-NEXT: movdqa (%rdi), %xmm1
530 ; SSE2-NEXT: movdqa %xmm0, %xmm2
531 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
532 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
533 ; SSE2-NEXT: por %xmm2, %xmm3
534 ; SSE2-NEXT: movdqa %xmm1, %xmm4
535 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
536 ; SSE2-NEXT: movdqa %xmm0, %xmm5
537 ; SSE2-NEXT: pand %xmm4, %xmm5
538 ; SSE2-NEXT: pandn %xmm1, %xmm4
539 ; SSE2-NEXT: por %xmm5, %xmm4
540 ; SSE2-NEXT: movdqa %xmm0, %xmm5
541 ; SSE2-NEXT: pand %xmm2, %xmm5
542 ; SSE2-NEXT: pandn %xmm1, %xmm2
543 ; SSE2-NEXT: por %xmm5, %xmm2
544 ; SSE2-NEXT: psubd %xmm4, %xmm2
545 ; SSE2-NEXT: psrld $1, %xmm2
546 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
547 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
548 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
549 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
550 ; SSE2-NEXT: pmuludq %xmm1, %xmm3
551 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
552 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
553 ; SSE2-NEXT: paddd %xmm2, %xmm0
556 ; SSE41-LABEL: vec128_i32_signed_reg_mem:
558 ; SSE41-NEXT: movdqa (%rdi), %xmm2
559 ; SSE41-NEXT: movdqa %xmm0, %xmm1
560 ; SSE41-NEXT: pcmpgtd %xmm2, %xmm1
561 ; SSE41-NEXT: por {{.*}}(%rip), %xmm1
562 ; SSE41-NEXT: movdqa %xmm0, %xmm3
563 ; SSE41-NEXT: pminsd %xmm2, %xmm3
564 ; SSE41-NEXT: pmaxsd %xmm0, %xmm2
565 ; SSE41-NEXT: psubd %xmm3, %xmm2
566 ; SSE41-NEXT: psrld $1, %xmm2
567 ; SSE41-NEXT: pmulld %xmm2, %xmm1
568 ; SSE41-NEXT: paddd %xmm0, %xmm1
569 ; SSE41-NEXT: movdqa %xmm1, %xmm0
572 ; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
573 ; AVX1-FALLBACK: # %bb.0:
574 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
575 ; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
576 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
577 ; AVX1-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
578 ; AVX1-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
579 ; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
580 ; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
581 ; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
582 ; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
583 ; AVX1-FALLBACK-NEXT: retq
585 ; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
586 ; AVX2-FALLBACK: # %bb.0:
587 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
588 ; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
589 ; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
590 ; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
591 ; AVX2-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
592 ; AVX2-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
593 ; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
594 ; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
595 ; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
596 ; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
597 ; AVX2-FALLBACK-NEXT: retq
599 ; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
600 ; XOP-FALLBACK: # %bb.0:
601 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
602 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
603 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
604 ; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
605 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
606 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
607 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
608 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
609 ; XOP-FALLBACK-NEXT: retq
611 ; XOPAVX1-LABEL: vec128_i32_signed_reg_mem:
613 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
614 ; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
615 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
616 ; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
617 ; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
618 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
619 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
620 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
623 ; XOPAVX2-LABEL: vec128_i32_signed_reg_mem:
625 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
626 ; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
627 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
628 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
629 ; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
630 ; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
631 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
632 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
633 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
636 ; AVX512F-LABEL: vec128_i32_signed_reg_mem:
638 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
639 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
640 ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
641 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
642 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
643 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
644 ; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
645 ; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
646 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
647 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
648 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
649 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
650 ; AVX512F-NEXT: vzeroupper
653 ; AVX512VL-LABEL: vec128_i32_signed_reg_mem:
655 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
656 ; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
657 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
658 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
659 ; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
660 ; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
661 ; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
662 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
663 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
664 ; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
665 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
666 ; AVX512VL-NEXT: retq
668 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
669 ; AVX512BW-FALLBACK: # %bb.0:
670 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
671 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
672 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
673 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
674 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
675 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
676 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
677 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
678 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
679 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
680 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
681 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
682 ; AVX512BW-FALLBACK-NEXT: vzeroupper
683 ; AVX512BW-FALLBACK-NEXT: retq
684 %a2 = load <4 x i32>, <4 x i32>* %a2_addr
685 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
686 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
687 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
688 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
689 %t7 = sub <4 x i32> %t6, %t5
690 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
691 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
692 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
696 define <4 x i32> @vec128_i32_signed_mem_mem(<4 x i32>* %a1_addr, <4 x i32>* %a2_addr) nounwind {
697 ; SSE2-LABEL: vec128_i32_signed_mem_mem:
699 ; SSE2-NEXT: movdqa (%rdi), %xmm1
700 ; SSE2-NEXT: movdqa (%rsi), %xmm0
701 ; SSE2-NEXT: movdqa %xmm1, %xmm2
702 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
703 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1]
704 ; SSE2-NEXT: por %xmm2, %xmm3
705 ; SSE2-NEXT: movdqa %xmm0, %xmm4
706 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
707 ; SSE2-NEXT: movdqa %xmm1, %xmm5
708 ; SSE2-NEXT: pand %xmm4, %xmm5
709 ; SSE2-NEXT: pandn %xmm0, %xmm4
710 ; SSE2-NEXT: por %xmm5, %xmm4
711 ; SSE2-NEXT: movdqa %xmm1, %xmm5
712 ; SSE2-NEXT: pand %xmm2, %xmm5
713 ; SSE2-NEXT: pandn %xmm0, %xmm2
714 ; SSE2-NEXT: por %xmm5, %xmm2
715 ; SSE2-NEXT: psubd %xmm4, %xmm2
716 ; SSE2-NEXT: psrld $1, %xmm2
717 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
718 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
719 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
720 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
721 ; SSE2-NEXT: pmuludq %xmm4, %xmm2
722 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
723 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
724 ; SSE2-NEXT: paddd %xmm1, %xmm0
727 ; SSE41-LABEL: vec128_i32_signed_mem_mem:
729 ; SSE41-NEXT: movdqa (%rdi), %xmm1
730 ; SSE41-NEXT: movdqa (%rsi), %xmm0
731 ; SSE41-NEXT: movdqa %xmm1, %xmm2
732 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
733 ; SSE41-NEXT: por {{.*}}(%rip), %xmm2
734 ; SSE41-NEXT: movdqa %xmm1, %xmm3
735 ; SSE41-NEXT: pminsd %xmm0, %xmm3
736 ; SSE41-NEXT: pmaxsd %xmm1, %xmm0
737 ; SSE41-NEXT: psubd %xmm3, %xmm0
738 ; SSE41-NEXT: psrld $1, %xmm0
739 ; SSE41-NEXT: pmulld %xmm2, %xmm0
740 ; SSE41-NEXT: paddd %xmm1, %xmm0
743 ; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
744 ; AVX1-FALLBACK: # %bb.0:
745 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
746 ; AVX1-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
747 ; AVX1-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
748 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
749 ; AVX1-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
750 ; AVX1-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
751 ; AVX1-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
752 ; AVX1-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
753 ; AVX1-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
754 ; AVX1-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
755 ; AVX1-FALLBACK-NEXT: retq
757 ; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
758 ; AVX2-FALLBACK: # %bb.0:
759 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
760 ; AVX2-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
761 ; AVX2-FALLBACK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2
762 ; AVX2-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
763 ; AVX2-FALLBACK-NEXT: vpor %xmm3, %xmm2, %xmm2
764 ; AVX2-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
765 ; AVX2-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
766 ; AVX2-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
767 ; AVX2-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
768 ; AVX2-FALLBACK-NEXT: vpmulld %xmm2, %xmm1, %xmm1
769 ; AVX2-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
770 ; AVX2-FALLBACK-NEXT: retq
772 ; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
773 ; XOP-FALLBACK: # %bb.0:
774 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
775 ; XOP-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
776 ; XOP-FALLBACK-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
777 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
778 ; XOP-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm3
779 ; XOP-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
780 ; XOP-FALLBACK-NEXT: vpsubd %xmm3, %xmm1, %xmm1
781 ; XOP-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
782 ; XOP-FALLBACK-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
783 ; XOP-FALLBACK-NEXT: retq
785 ; XOPAVX1-LABEL: vec128_i32_signed_mem_mem:
787 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm0
788 ; XOPAVX1-NEXT: vmovdqa (%rsi), %xmm1
789 ; XOPAVX1-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
790 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
791 ; XOPAVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm3
792 ; XOPAVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
793 ; XOPAVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
794 ; XOPAVX1-NEXT: vpsrld $1, %xmm1, %xmm1
795 ; XOPAVX1-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
798 ; XOPAVX2-LABEL: vec128_i32_signed_mem_mem:
800 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm0
801 ; XOPAVX2-NEXT: vmovdqa (%rsi), %xmm1
802 ; XOPAVX2-NEXT: vpcomgtd %xmm1, %xmm0, %xmm2
803 ; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
804 ; XOPAVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
805 ; XOPAVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm3
806 ; XOPAVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
807 ; XOPAVX2-NEXT: vpsubd %xmm3, %xmm1, %xmm1
808 ; XOPAVX2-NEXT: vpsrld $1, %xmm1, %xmm1
809 ; XOPAVX2-NEXT: vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
812 ; AVX512F-LABEL: vec128_i32_signed_mem_mem:
814 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
815 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
816 ; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
817 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
818 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
819 ; AVX512F-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
820 ; AVX512F-NEXT: vpminsd %xmm1, %xmm0, %xmm2
821 ; AVX512F-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
822 ; AVX512F-NEXT: vpsubd %xmm2, %xmm1, %xmm1
823 ; AVX512F-NEXT: vpsrld $1, %xmm1, %xmm1
824 ; AVX512F-NEXT: vpmulld %xmm3, %xmm1, %xmm1
825 ; AVX512F-NEXT: vpaddd %xmm0, %xmm1, %xmm0
826 ; AVX512F-NEXT: vzeroupper
829 ; AVX512VL-LABEL: vec128_i32_signed_mem_mem:
831 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
832 ; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
833 ; AVX512VL-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
834 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
835 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
836 ; AVX512VL-NEXT: vmovdqa32 %xmm2, %xmm3 {%k1}
837 ; AVX512VL-NEXT: vpminsd %xmm1, %xmm0, %xmm2
838 ; AVX512VL-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
839 ; AVX512VL-NEXT: vpsubd %xmm2, %xmm1, %xmm1
840 ; AVX512VL-NEXT: vpsrld $1, %xmm1, %xmm1
841 ; AVX512VL-NEXT: vpmulld %xmm3, %xmm1, %xmm1
842 ; AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
843 ; AVX512VL-NEXT: retq
845 ; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
846 ; AVX512BW-FALLBACK: # %bb.0:
847 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
848 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
849 ; AVX512BW-FALLBACK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
850 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
851 ; AVX512BW-FALLBACK-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
852 ; AVX512BW-FALLBACK-NEXT: vmovdqa32 %zmm2, %zmm3 {%k1}
853 ; AVX512BW-FALLBACK-NEXT: vpminsd %xmm1, %xmm0, %xmm2
854 ; AVX512BW-FALLBACK-NEXT: vpmaxsd %xmm1, %xmm0, %xmm1
855 ; AVX512BW-FALLBACK-NEXT: vpsubd %xmm2, %xmm1, %xmm1
856 ; AVX512BW-FALLBACK-NEXT: vpsrld $1, %xmm1, %xmm1
857 ; AVX512BW-FALLBACK-NEXT: vpmulld %xmm3, %xmm1, %xmm1
858 ; AVX512BW-FALLBACK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
859 ; AVX512BW-FALLBACK-NEXT: vzeroupper
860 ; AVX512BW-FALLBACK-NEXT: retq
861 %a1 = load <4 x i32>, <4 x i32>* %a1_addr
862 %a2 = load <4 x i32>, <4 x i32>* %a2_addr
863 %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
864 %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
865 %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
866 %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
867 %t7 = sub <4 x i32> %t6, %t5
868 %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
869 %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
870 %a10 = add nsw <4 x i32> %t9, %a1 ; signed
874 ; ---------------------------------------------------------------------------- ;
875 ; 64-bit width. 128 / 64 = 2 elts.
876 ; ---------------------------------------------------------------------------- ;
878 ; Values come from regs
880 define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
881 ; SSE2-LABEL: vec128_i64_signed_reg_reg:
883 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
884 ; SSE2-NEXT: movdqa %xmm1, %xmm5
885 ; SSE2-NEXT: pxor %xmm4, %xmm5
886 ; SSE2-NEXT: pxor %xmm0, %xmm4
887 ; SSE2-NEXT: movdqa %xmm4, %xmm2
888 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
889 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
890 ; SSE2-NEXT: movdqa %xmm4, %xmm6
891 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
892 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
893 ; SSE2-NEXT: pand %xmm6, %xmm3
894 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
895 ; SSE2-NEXT: por %xmm3, %xmm2
896 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
897 ; SSE2-NEXT: por %xmm2, %xmm3
898 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
899 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
900 ; SSE2-NEXT: pand %xmm6, %xmm4
901 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
902 ; SSE2-NEXT: por %xmm4, %xmm5
903 ; SSE2-NEXT: movdqa %xmm0, %xmm4
904 ; SSE2-NEXT: pand %xmm5, %xmm4
905 ; SSE2-NEXT: pandn %xmm1, %xmm5
906 ; SSE2-NEXT: por %xmm4, %xmm5
907 ; SSE2-NEXT: movdqa %xmm0, %xmm4
908 ; SSE2-NEXT: pand %xmm2, %xmm4
909 ; SSE2-NEXT: pandn %xmm1, %xmm2
910 ; SSE2-NEXT: por %xmm4, %xmm2
911 ; SSE2-NEXT: psubq %xmm5, %xmm2
912 ; SSE2-NEXT: psrlq $1, %xmm2
913 ; SSE2-NEXT: movdqa %xmm3, %xmm4
914 ; SSE2-NEXT: psrlq $32, %xmm4
915 ; SSE2-NEXT: pmuludq %xmm2, %xmm4
916 ; SSE2-NEXT: movdqa %xmm2, %xmm1
917 ; SSE2-NEXT: psrlq $32, %xmm1
918 ; SSE2-NEXT: pmuludq %xmm3, %xmm1
919 ; SSE2-NEXT: paddq %xmm4, %xmm1
920 ; SSE2-NEXT: psllq $32, %xmm1
921 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
922 ; SSE2-NEXT: paddq %xmm0, %xmm1
923 ; SSE2-NEXT: paddq %xmm2, %xmm1
924 ; SSE2-NEXT: movdqa %xmm1, %xmm0
927 ; SSE41-LABEL: vec128_i64_signed_reg_reg:
929 ; SSE41-NEXT: movdqa %xmm0, %xmm2
930 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
931 ; SSE41-NEXT: movdqa %xmm1, %xmm5
932 ; SSE41-NEXT: pxor %xmm0, %xmm5
933 ; SSE41-NEXT: pxor %xmm2, %xmm0
934 ; SSE41-NEXT: movdqa %xmm0, %xmm3
935 ; SSE41-NEXT: pcmpgtd %xmm5, %xmm3
936 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
937 ; SSE41-NEXT: movdqa %xmm0, %xmm6
938 ; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
939 ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
940 ; SSE41-NEXT: pand %xmm4, %xmm7
941 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
942 ; SSE41-NEXT: por %xmm7, %xmm4
943 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
944 ; SSE41-NEXT: por %xmm4, %xmm3
945 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
946 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
947 ; SSE41-NEXT: pand %xmm6, %xmm0
948 ; SSE41-NEXT: por %xmm5, %xmm0
949 ; SSE41-NEXT: movdqa %xmm1, %xmm5
950 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
951 ; SSE41-NEXT: movdqa %xmm4, %xmm0
952 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
953 ; SSE41-NEXT: psubq %xmm5, %xmm1
954 ; SSE41-NEXT: psrlq $1, %xmm1
955 ; SSE41-NEXT: movdqa %xmm3, %xmm0
956 ; SSE41-NEXT: psrlq $32, %xmm0
957 ; SSE41-NEXT: pmuludq %xmm1, %xmm0
958 ; SSE41-NEXT: movdqa %xmm1, %xmm4
959 ; SSE41-NEXT: psrlq $32, %xmm4
960 ; SSE41-NEXT: pmuludq %xmm3, %xmm4
961 ; SSE41-NEXT: paddq %xmm0, %xmm4
962 ; SSE41-NEXT: psllq $32, %xmm4
963 ; SSE41-NEXT: pmuludq %xmm1, %xmm3
964 ; SSE41-NEXT: paddq %xmm2, %xmm4
965 ; SSE41-NEXT: paddq %xmm4, %xmm3
966 ; SSE41-NEXT: movdqa %xmm3, %xmm0
969 ; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
970 ; AVX1-FALLBACK: # %bb.0:
971 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
972 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
973 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
974 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
975 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
976 ; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
977 ; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
978 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
979 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
980 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
981 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
982 ; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
983 ; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
984 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
985 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
986 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
987 ; AVX1-FALLBACK-NEXT: retq
989 ; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
990 ; AVX2-FALLBACK: # %bb.0:
991 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
992 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
993 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
994 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
995 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
996 ; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
997 ; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
998 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
999 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1000 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
1001 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1002 ; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1003 ; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1004 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1005 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1006 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1007 ; AVX2-FALLBACK-NEXT: retq
1009 ; XOP-LABEL: vec128_i64_signed_reg_reg:
1011 ; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
1012 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1013 ; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm4
1014 ; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1015 ; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1016 ; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1017 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
1018 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
1019 ; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1020 ; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
1021 ; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1022 ; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1023 ; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
1024 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1025 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1026 ; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1029 ; AVX512F-LABEL: vec128_i64_signed_reg_reg:
1031 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1032 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1033 ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1034 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1035 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1036 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1037 ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1038 ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1039 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1040 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
1041 ; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
1042 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1043 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1044 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
1045 ; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1046 ; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
1047 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1048 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1049 ; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1050 ; AVX512F-NEXT: vzeroupper
1051 ; AVX512F-NEXT: retq
1053 ; AVX512VL-LABEL: vec128_i64_signed_reg_reg:
1054 ; AVX512VL: # %bb.0:
1055 ; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
1056 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1057 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1058 ; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
1059 ; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
1060 ; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
1061 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1062 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
1063 ; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
1064 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1065 ; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
1066 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1067 ; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1068 ; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
1069 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1070 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1071 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1072 ; AVX512VL-NEXT: retq
1074 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
1075 ; AVX512BW-FALLBACK: # %bb.0:
1076 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1077 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1078 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1079 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1080 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1081 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1082 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1083 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1084 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1085 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1086 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
1087 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1088 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1089 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
1090 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1091 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1092 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1093 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1094 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1095 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1096 ; AVX512BW-FALLBACK-NEXT: retq
1097 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
1098 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1099 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1100 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1101 %t7 = sub <2 x i64> %t6, %t5
1102 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1103 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
1104 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
1108 define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
1109 ; SSE2-LABEL: vec128_i64_unsigned_reg_reg:
1111 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
1112 ; SSE2-NEXT: movdqa %xmm1, %xmm5
1113 ; SSE2-NEXT: pxor %xmm4, %xmm5
1114 ; SSE2-NEXT: pxor %xmm0, %xmm4
1115 ; SSE2-NEXT: movdqa %xmm4, %xmm2
1116 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
1117 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
1118 ; SSE2-NEXT: movdqa %xmm4, %xmm6
1119 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
1120 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
1121 ; SSE2-NEXT: pand %xmm6, %xmm3
1122 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1123 ; SSE2-NEXT: por %xmm3, %xmm2
1124 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
1125 ; SSE2-NEXT: por %xmm2, %xmm3
1126 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
1127 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
1128 ; SSE2-NEXT: pand %xmm6, %xmm4
1129 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
1130 ; SSE2-NEXT: por %xmm4, %xmm5
1131 ; SSE2-NEXT: movdqa %xmm0, %xmm4
1132 ; SSE2-NEXT: pand %xmm5, %xmm4
1133 ; SSE2-NEXT: pandn %xmm1, %xmm5
1134 ; SSE2-NEXT: por %xmm4, %xmm5
1135 ; SSE2-NEXT: movdqa %xmm0, %xmm4
1136 ; SSE2-NEXT: pand %xmm2, %xmm4
1137 ; SSE2-NEXT: pandn %xmm1, %xmm2
1138 ; SSE2-NEXT: por %xmm4, %xmm2
1139 ; SSE2-NEXT: psubq %xmm5, %xmm2
1140 ; SSE2-NEXT: psrlq $1, %xmm2
1141 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1142 ; SSE2-NEXT: psrlq $32, %xmm4
1143 ; SSE2-NEXT: pmuludq %xmm2, %xmm4
1144 ; SSE2-NEXT: movdqa %xmm2, %xmm1
1145 ; SSE2-NEXT: psrlq $32, %xmm1
1146 ; SSE2-NEXT: pmuludq %xmm3, %xmm1
1147 ; SSE2-NEXT: paddq %xmm4, %xmm1
1148 ; SSE2-NEXT: psllq $32, %xmm1
1149 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
1150 ; SSE2-NEXT: paddq %xmm0, %xmm1
1151 ; SSE2-NEXT: paddq %xmm2, %xmm1
1152 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1155 ; SSE41-LABEL: vec128_i64_unsigned_reg_reg:
1157 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1158 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
1159 ; SSE41-NEXT: movdqa %xmm1, %xmm5
1160 ; SSE41-NEXT: pxor %xmm0, %xmm5
1161 ; SSE41-NEXT: pxor %xmm2, %xmm0
1162 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1163 ; SSE41-NEXT: pcmpgtd %xmm5, %xmm3
1164 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
1165 ; SSE41-NEXT: movdqa %xmm0, %xmm6
1166 ; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
1167 ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
1168 ; SSE41-NEXT: pand %xmm4, %xmm7
1169 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
1170 ; SSE41-NEXT: por %xmm7, %xmm4
1171 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
1172 ; SSE41-NEXT: por %xmm4, %xmm3
1173 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
1174 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
1175 ; SSE41-NEXT: pand %xmm6, %xmm0
1176 ; SSE41-NEXT: por %xmm5, %xmm0
1177 ; SSE41-NEXT: movdqa %xmm1, %xmm5
1178 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
1179 ; SSE41-NEXT: movdqa %xmm4, %xmm0
1180 ; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
1181 ; SSE41-NEXT: psubq %xmm5, %xmm1
1182 ; SSE41-NEXT: psrlq $1, %xmm1
1183 ; SSE41-NEXT: movdqa %xmm3, %xmm0
1184 ; SSE41-NEXT: psrlq $32, %xmm0
1185 ; SSE41-NEXT: pmuludq %xmm1, %xmm0
1186 ; SSE41-NEXT: movdqa %xmm1, %xmm4
1187 ; SSE41-NEXT: psrlq $32, %xmm4
1188 ; SSE41-NEXT: pmuludq %xmm3, %xmm4
1189 ; SSE41-NEXT: paddq %xmm0, %xmm4
1190 ; SSE41-NEXT: psllq $32, %xmm4
1191 ; SSE41-NEXT: pmuludq %xmm1, %xmm3
1192 ; SSE41-NEXT: paddq %xmm2, %xmm4
1193 ; SSE41-NEXT: paddq %xmm4, %xmm3
1194 ; SSE41-NEXT: movdqa %xmm3, %xmm0
1197 ; AVX1-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
1198 ; AVX1-FALLBACK: # %bb.0:
1199 ; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1200 ; AVX1-FALLBACK-NEXT: vpxor %xmm2, %xmm1, %xmm3
1201 ; AVX1-FALLBACK-NEXT: vpxor %xmm2, %xmm0, %xmm2
1202 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm4
1203 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm4, %xmm5
1204 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
1205 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm2
1206 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm1
1207 ; AVX1-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1208 ; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1209 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm5, %xmm2
1210 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1211 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm3
1212 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm5, %xmm3, %xmm3
1213 ; AVX1-FALLBACK-NEXT: vpaddq %xmm3, %xmm2, %xmm2
1214 ; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1215 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm5, %xmm1, %xmm1
1216 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1217 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1218 ; AVX1-FALLBACK-NEXT: retq
1220 ; AVX2-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
1221 ; AVX2-FALLBACK: # %bb.0:
1222 ; AVX2-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
1223 ; AVX2-FALLBACK-NEXT: vpxor %xmm2, %xmm1, %xmm3
1224 ; AVX2-FALLBACK-NEXT: vpxor %xmm2, %xmm0, %xmm2
1225 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm4
1226 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm4, %xmm5
1227 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
1228 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm2
1229 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm1
1230 ; AVX2-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1231 ; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1232 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm5, %xmm2
1233 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1234 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm3
1235 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm5, %xmm3, %xmm3
1236 ; AVX2-FALLBACK-NEXT: vpaddq %xmm3, %xmm2, %xmm2
1237 ; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1238 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm5, %xmm1, %xmm1
1239 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1240 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1241 ; AVX2-FALLBACK-NEXT: retq
1243 ; XOP-LABEL: vec128_i64_unsigned_reg_reg:
1245 ; XOP-NEXT: vpcomgtuq %xmm1, %xmm0, %xmm2
1246 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1247 ; XOP-NEXT: vpcomltuq %xmm1, %xmm0, %xmm4
1248 ; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1249 ; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1250 ; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1251 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
1252 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
1253 ; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1254 ; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
1255 ; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1256 ; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1257 ; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
1258 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1259 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1260 ; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1263 ; AVX512F-LABEL: vec128_i64_unsigned_reg_reg:
1265 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1266 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1267 ; AVX512F-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
1268 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1269 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1270 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1271 ; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm2
1272 ; AVX512F-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
1273 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1274 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
1275 ; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
1276 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1277 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1278 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
1279 ; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1280 ; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
1281 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1282 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1283 ; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1284 ; AVX512F-NEXT: vzeroupper
1285 ; AVX512F-NEXT: retq
1287 ; AVX512VL-LABEL: vec128_i64_unsigned_reg_reg:
1288 ; AVX512VL: # %bb.0:
1289 ; AVX512VL-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
1290 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1291 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1292 ; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
1293 ; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm2
1294 ; AVX512VL-NEXT: vpmaxuq %xmm1, %xmm0, %xmm1
1295 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1296 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
1297 ; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
1298 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1299 ; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
1300 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1301 ; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1302 ; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
1303 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1304 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1305 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1306 ; AVX512VL-NEXT: retq
1308 ; AVX512BW-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
1309 ; AVX512BW-FALLBACK: # %bb.0:
1310 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
1311 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1312 ; AVX512BW-FALLBACK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
1313 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1314 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1315 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1316 ; AVX512BW-FALLBACK-NEXT: vpminuq %zmm1, %zmm0, %zmm2
1317 ; AVX512BW-FALLBACK-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
1318 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1319 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1320 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
1321 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1322 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1323 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
1324 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1325 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1326 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1327 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1328 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1329 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1330 ; AVX512BW-FALLBACK-NEXT: retq
1331 %t3 = icmp ugt <2 x i64> %a1, %a2
1332 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1333 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1334 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1335 %t7 = sub <2 x i64> %t6, %t5
1336 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1337 %t9 = mul <2 x i64> %t8, %t4
1338 %a10 = add <2 x i64> %t9, %a1
1342 ; Values are loaded. Only check signed case.
1344 define <2 x i64> @vec128_i64_signed_mem_reg(<2 x i64>* %a1_addr, <2 x i64> %a2) nounwind {
1345 ; SSE2-LABEL: vec128_i64_signed_mem_reg:
1347 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1348 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
1349 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1350 ; SSE2-NEXT: pxor %xmm4, %xmm5
1351 ; SSE2-NEXT: pxor %xmm1, %xmm4
1352 ; SSE2-NEXT: movdqa %xmm4, %xmm2
1353 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
1354 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
1355 ; SSE2-NEXT: movdqa %xmm4, %xmm6
1356 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
1357 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
1358 ; SSE2-NEXT: pand %xmm6, %xmm3
1359 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1360 ; SSE2-NEXT: por %xmm3, %xmm2
1361 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
1362 ; SSE2-NEXT: por %xmm2, %xmm3
1363 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
1364 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
1365 ; SSE2-NEXT: pand %xmm6, %xmm4
1366 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
1367 ; SSE2-NEXT: por %xmm4, %xmm5
1368 ; SSE2-NEXT: movdqa %xmm1, %xmm4
1369 ; SSE2-NEXT: pand %xmm5, %xmm4
1370 ; SSE2-NEXT: pandn %xmm0, %xmm5
1371 ; SSE2-NEXT: por %xmm4, %xmm5
1372 ; SSE2-NEXT: movdqa %xmm1, %xmm4
1373 ; SSE2-NEXT: pand %xmm2, %xmm4
1374 ; SSE2-NEXT: pandn %xmm0, %xmm2
1375 ; SSE2-NEXT: por %xmm4, %xmm2
1376 ; SSE2-NEXT: psubq %xmm5, %xmm2
1377 ; SSE2-NEXT: psrlq $1, %xmm2
1378 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1379 ; SSE2-NEXT: psrlq $32, %xmm4
1380 ; SSE2-NEXT: pmuludq %xmm2, %xmm4
1381 ; SSE2-NEXT: movdqa %xmm2, %xmm0
1382 ; SSE2-NEXT: psrlq $32, %xmm0
1383 ; SSE2-NEXT: pmuludq %xmm3, %xmm0
1384 ; SSE2-NEXT: paddq %xmm4, %xmm0
1385 ; SSE2-NEXT: psllq $32, %xmm0
1386 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
1387 ; SSE2-NEXT: paddq %xmm1, %xmm0
1388 ; SSE2-NEXT: paddq %xmm2, %xmm0
1391 ; SSE41-LABEL: vec128_i64_signed_mem_reg:
1393 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1394 ; SSE41-NEXT: movdqa (%rdi), %xmm3
1395 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
1396 ; SSE41-NEXT: pxor %xmm5, %xmm0
1397 ; SSE41-NEXT: pxor %xmm3, %xmm5
1398 ; SSE41-NEXT: movdqa %xmm5, %xmm2
1399 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
1400 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
1401 ; SSE41-NEXT: movdqa %xmm5, %xmm6
1402 ; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
1403 ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
1404 ; SSE41-NEXT: pand %xmm4, %xmm7
1405 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1406 ; SSE41-NEXT: por %xmm7, %xmm2
1407 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
1408 ; SSE41-NEXT: por %xmm2, %xmm4
1409 ; SSE41-NEXT: pcmpgtd %xmm5, %xmm0
1410 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
1411 ; SSE41-NEXT: pand %xmm6, %xmm5
1412 ; SSE41-NEXT: por %xmm5, %xmm0
1413 ; SSE41-NEXT: movdqa %xmm1, %xmm5
1414 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm5
1415 ; SSE41-NEXT: movdqa %xmm2, %xmm0
1416 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
1417 ; SSE41-NEXT: psubq %xmm5, %xmm1
1418 ; SSE41-NEXT: psrlq $1, %xmm1
1419 ; SSE41-NEXT: movdqa %xmm4, %xmm2
1420 ; SSE41-NEXT: psrlq $32, %xmm2
1421 ; SSE41-NEXT: pmuludq %xmm1, %xmm2
1422 ; SSE41-NEXT: movdqa %xmm1, %xmm0
1423 ; SSE41-NEXT: psrlq $32, %xmm0
1424 ; SSE41-NEXT: pmuludq %xmm4, %xmm0
1425 ; SSE41-NEXT: paddq %xmm2, %xmm0
1426 ; SSE41-NEXT: psllq $32, %xmm0
1427 ; SSE41-NEXT: pmuludq %xmm4, %xmm1
1428 ; SSE41-NEXT: paddq %xmm3, %xmm0
1429 ; SSE41-NEXT: paddq %xmm1, %xmm0
1432 ; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
1433 ; AVX1-FALLBACK: # %bb.0:
1434 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1435 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
1436 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1437 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm4
1438 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
1439 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
1440 ; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm0, %xmm0
1441 ; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm0, %xmm0
1442 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
1443 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
1444 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm0, %xmm4
1445 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1446 ; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1447 ; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1448 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1449 ; AVX1-FALLBACK-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1450 ; AVX1-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1451 ; AVX1-FALLBACK-NEXT: retq
1453 ; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
1454 ; AVX2-FALLBACK: # %bb.0:
1455 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1456 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
1457 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1458 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm4
1459 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
1460 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
1461 ; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm0, %xmm0
1462 ; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm0, %xmm0
1463 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
1464 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
1465 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm0, %xmm4
1466 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1467 ; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1468 ; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1469 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1470 ; AVX2-FALLBACK-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1471 ; AVX2-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1472 ; AVX2-FALLBACK-NEXT: retq
1474 ; XOP-LABEL: vec128_i64_signed_mem_reg:
1476 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
1477 ; XOP-NEXT: vpcomgtq %xmm0, %xmm1, %xmm2
1478 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1479 ; XOP-NEXT: vpcomltq %xmm0, %xmm1, %xmm4
1480 ; XOP-NEXT: vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
1481 ; XOP-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
1482 ; XOP-NEXT: vpsubq %xmm4, %xmm0, %xmm0
1483 ; XOP-NEXT: vpsrlq $1, %xmm0, %xmm0
1484 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
1485 ; XOP-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
1486 ; XOP-NEXT: vpsrlq $32, %xmm0, %xmm4
1487 ; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1488 ; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1489 ; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
1490 ; XOP-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1491 ; XOP-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1492 ; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1495 ; AVX512F-LABEL: vec128_i64_signed_mem_reg:
1497 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1498 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
1499 ; AVX512F-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
1500 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1501 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1502 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1503 ; AVX512F-NEXT: vpminsq %zmm0, %zmm1, %zmm2
1504 ; AVX512F-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
1505 ; AVX512F-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1506 ; AVX512F-NEXT: vpsrlq $1, %xmm0, %xmm0
1507 ; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm2
1508 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1509 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1510 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
1511 ; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1512 ; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
1513 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1514 ; AVX512F-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1515 ; AVX512F-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1516 ; AVX512F-NEXT: vzeroupper
1517 ; AVX512F-NEXT: retq
1519 ; AVX512VL-LABEL: vec128_i64_signed_mem_reg:
1520 ; AVX512VL: # %bb.0:
1521 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
1522 ; AVX512VL-NEXT: vpcmpgtq %xmm0, %xmm1, %k1
1523 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1524 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1525 ; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
1526 ; AVX512VL-NEXT: vpminsq %xmm0, %xmm1, %xmm2
1527 ; AVX512VL-NEXT: vpmaxsq %xmm0, %xmm1, %xmm0
1528 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1529 ; AVX512VL-NEXT: vpsrlq $1, %xmm0, %xmm0
1530 ; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
1531 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm0, %xmm2
1532 ; AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm4
1533 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1534 ; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1535 ; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
1536 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1537 ; AVX512VL-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1538 ; AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1539 ; AVX512VL-NEXT: retq
1541 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
1542 ; AVX512BW-FALLBACK: # %bb.0:
1543 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1544 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1545 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
1546 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1547 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1548 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1549 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm0, %zmm1, %zmm2
1550 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm0, %zmm1, %zmm0
1551 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm0, %xmm0
1552 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm0, %xmm0
1553 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm0, %xmm2
1554 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1555 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1556 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
1557 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1558 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1559 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm0, %xmm0
1560 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm2, %xmm1
1561 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1562 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1563 ; AVX512BW-FALLBACK-NEXT: retq
1564 %a1 = load <2 x i64>, <2 x i64>* %a1_addr
1565 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
1566 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1567 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1568 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1569 %t7 = sub <2 x i64> %t6, %t5
1570 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1571 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
1572 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
1576 define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, <2 x i64>* %a2_addr) nounwind {
1577 ; SSE2-LABEL: vec128_i64_signed_reg_mem:
1579 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1580 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
1581 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1582 ; SSE2-NEXT: pxor %xmm4, %xmm5
1583 ; SSE2-NEXT: pxor %xmm1, %xmm4
1584 ; SSE2-NEXT: movdqa %xmm5, %xmm2
1585 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
1586 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
1587 ; SSE2-NEXT: movdqa %xmm5, %xmm6
1588 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm6
1589 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
1590 ; SSE2-NEXT: pand %xmm6, %xmm3
1591 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1592 ; SSE2-NEXT: por %xmm3, %xmm2
1593 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
1594 ; SSE2-NEXT: por %xmm2, %xmm3
1595 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm4
1596 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
1597 ; SSE2-NEXT: pand %xmm6, %xmm5
1598 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
1599 ; SSE2-NEXT: por %xmm5, %xmm4
1600 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1601 ; SSE2-NEXT: pand %xmm4, %xmm5
1602 ; SSE2-NEXT: pandn %xmm1, %xmm4
1603 ; SSE2-NEXT: por %xmm5, %xmm4
1604 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1605 ; SSE2-NEXT: pand %xmm2, %xmm5
1606 ; SSE2-NEXT: pandn %xmm1, %xmm2
1607 ; SSE2-NEXT: por %xmm5, %xmm2
1608 ; SSE2-NEXT: psubq %xmm4, %xmm2
1609 ; SSE2-NEXT: psrlq $1, %xmm2
1610 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1611 ; SSE2-NEXT: psrlq $32, %xmm4
1612 ; SSE2-NEXT: pmuludq %xmm2, %xmm4
1613 ; SSE2-NEXT: movdqa %xmm2, %xmm1
1614 ; SSE2-NEXT: psrlq $32, %xmm1
1615 ; SSE2-NEXT: pmuludq %xmm3, %xmm1
1616 ; SSE2-NEXT: paddq %xmm4, %xmm1
1617 ; SSE2-NEXT: psllq $32, %xmm1
1618 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
1619 ; SSE2-NEXT: paddq %xmm0, %xmm1
1620 ; SSE2-NEXT: paddq %xmm2, %xmm1
1621 ; SSE2-NEXT: movdqa %xmm1, %xmm0
1624 ; SSE41-LABEL: vec128_i64_signed_reg_mem:
1626 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1627 ; SSE41-NEXT: movdqa (%rdi), %xmm3
1628 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
1629 ; SSE41-NEXT: movdqa %xmm0, %xmm6
1630 ; SSE41-NEXT: pxor %xmm5, %xmm6
1631 ; SSE41-NEXT: pxor %xmm3, %xmm5
1632 ; SSE41-NEXT: movdqa %xmm6, %xmm2
1633 ; SSE41-NEXT: pcmpgtd %xmm5, %xmm2
1634 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
1635 ; SSE41-NEXT: movdqa %xmm6, %xmm0
1636 ; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
1637 ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
1638 ; SSE41-NEXT: pand %xmm4, %xmm7
1639 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1640 ; SSE41-NEXT: por %xmm7, %xmm2
1641 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
1642 ; SSE41-NEXT: por %xmm2, %xmm4
1643 ; SSE41-NEXT: pcmpgtd %xmm6, %xmm5
1644 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
1645 ; SSE41-NEXT: pand %xmm6, %xmm0
1646 ; SSE41-NEXT: por %xmm5, %xmm0
1647 ; SSE41-NEXT: movdqa %xmm3, %xmm5
1648 ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
1649 ; SSE41-NEXT: movdqa %xmm2, %xmm0
1650 ; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm3
1651 ; SSE41-NEXT: psubq %xmm5, %xmm3
1652 ; SSE41-NEXT: psrlq $1, %xmm3
1653 ; SSE41-NEXT: movdqa %xmm4, %xmm2
1654 ; SSE41-NEXT: psrlq $32, %xmm2
1655 ; SSE41-NEXT: pmuludq %xmm3, %xmm2
1656 ; SSE41-NEXT: movdqa %xmm3, %xmm0
1657 ; SSE41-NEXT: psrlq $32, %xmm0
1658 ; SSE41-NEXT: pmuludq %xmm4, %xmm0
1659 ; SSE41-NEXT: paddq %xmm2, %xmm0
1660 ; SSE41-NEXT: psllq $32, %xmm0
1661 ; SSE41-NEXT: pmuludq %xmm4, %xmm3
1662 ; SSE41-NEXT: paddq %xmm1, %xmm0
1663 ; SSE41-NEXT: paddq %xmm3, %xmm0
1666 ; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
1667 ; AVX1-FALLBACK: # %bb.0:
1668 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1669 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
1670 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1671 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
1672 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1673 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1674 ; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1675 ; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1676 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
1677 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1678 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
1679 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1680 ; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1681 ; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1682 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1683 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1684 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1685 ; AVX1-FALLBACK-NEXT: retq
1687 ; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
1688 ; AVX2-FALLBACK: # %bb.0:
1689 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1690 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
1691 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1692 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
1693 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1694 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1695 ; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1696 ; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1697 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
1698 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1699 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
1700 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1701 ; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1702 ; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1703 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1704 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1705 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1706 ; AVX2-FALLBACK-NEXT: retq
1708 ; XOP-LABEL: vec128_i64_signed_reg_mem:
1710 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
1711 ; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
1712 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1713 ; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm4
1714 ; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1715 ; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1716 ; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1717 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
1718 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
1719 ; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1720 ; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
1721 ; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1722 ; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1723 ; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
1724 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1725 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1726 ; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1729 ; AVX512F-LABEL: vec128_i64_signed_reg_mem:
1731 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1732 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
1733 ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1734 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1735 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1736 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1737 ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1738 ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1739 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1740 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
1741 ; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
1742 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1743 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1744 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
1745 ; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1746 ; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
1747 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1748 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1749 ; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1750 ; AVX512F-NEXT: vzeroupper
1751 ; AVX512F-NEXT: retq
1753 ; AVX512VL-LABEL: vec128_i64_signed_reg_mem:
1754 ; AVX512VL: # %bb.0:
1755 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm1
1756 ; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
1757 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1758 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1759 ; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
1760 ; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
1761 ; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
1762 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1763 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
1764 ; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
1765 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1766 ; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
1767 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1768 ; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1769 ; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
1770 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1771 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1772 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1773 ; AVX512VL-NEXT: retq
1775 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
1776 ; AVX512BW-FALLBACK: # %bb.0:
1777 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
1778 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
1779 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1780 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1781 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1782 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1783 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1784 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1785 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1786 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1787 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
1788 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1789 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
1790 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
1791 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1792 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1793 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1794 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1795 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1796 ; AVX512BW-FALLBACK-NEXT: vzeroupper
1797 ; AVX512BW-FALLBACK-NEXT: retq
1798 %a2 = load <2 x i64>, <2 x i64>* %a2_addr
1799 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
1800 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
1801 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
1802 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
1803 %t7 = sub <2 x i64> %t6, %t5
1804 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
1805 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
1806 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
1810 define <2 x i64> @vec128_i64_signed_mem_mem(<2 x i64>* %a1_addr, <2 x i64>* %a2_addr) nounwind {
1811 ; SSE2-LABEL: vec128_i64_signed_mem_mem:
1813 ; SSE2-NEXT: movdqa (%rdi), %xmm1
1814 ; SSE2-NEXT: movdqa (%rsi), %xmm0
1815 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
1816 ; SSE2-NEXT: movdqa %xmm0, %xmm5
1817 ; SSE2-NEXT: pxor %xmm4, %xmm5
1818 ; SSE2-NEXT: pxor %xmm1, %xmm4
1819 ; SSE2-NEXT: movdqa %xmm4, %xmm2
1820 ; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
1821 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
1822 ; SSE2-NEXT: movdqa %xmm4, %xmm6
1823 ; SSE2-NEXT: pcmpeqd %xmm5, %xmm6
1824 ; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
1825 ; SSE2-NEXT: pand %xmm6, %xmm3
1826 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
1827 ; SSE2-NEXT: por %xmm3, %xmm2
1828 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1]
1829 ; SSE2-NEXT: por %xmm2, %xmm3
1830 ; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
1831 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
1832 ; SSE2-NEXT: pand %xmm6, %xmm4
1833 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
1834 ; SSE2-NEXT: por %xmm4, %xmm5
1835 ; SSE2-NEXT: movdqa %xmm1, %xmm4
1836 ; SSE2-NEXT: pand %xmm5, %xmm4
1837 ; SSE2-NEXT: pandn %xmm0, %xmm5
1838 ; SSE2-NEXT: por %xmm4, %xmm5
1839 ; SSE2-NEXT: movdqa %xmm1, %xmm4
1840 ; SSE2-NEXT: pand %xmm2, %xmm4
1841 ; SSE2-NEXT: pandn %xmm0, %xmm2
1842 ; SSE2-NEXT: por %xmm4, %xmm2
1843 ; SSE2-NEXT: psubq %xmm5, %xmm2
1844 ; SSE2-NEXT: psrlq $1, %xmm2
1845 ; SSE2-NEXT: movdqa %xmm3, %xmm4
1846 ; SSE2-NEXT: psrlq $32, %xmm4
1847 ; SSE2-NEXT: pmuludq %xmm2, %xmm4
1848 ; SSE2-NEXT: movdqa %xmm2, %xmm0
1849 ; SSE2-NEXT: psrlq $32, %xmm0
1850 ; SSE2-NEXT: pmuludq %xmm3, %xmm0
1851 ; SSE2-NEXT: paddq %xmm4, %xmm0
1852 ; SSE2-NEXT: psllq $32, %xmm0
1853 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
1854 ; SSE2-NEXT: paddq %xmm1, %xmm0
1855 ; SSE2-NEXT: paddq %xmm2, %xmm0
1858 ; SSE41-LABEL: vec128_i64_signed_mem_mem:
1860 ; SSE41-NEXT: movdqa (%rdi), %xmm3
1861 ; SSE41-NEXT: movdqa (%rsi), %xmm2
1862 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
1863 ; SSE41-NEXT: movdqa %xmm2, %xmm5
1864 ; SSE41-NEXT: pxor %xmm0, %xmm5
1865 ; SSE41-NEXT: pxor %xmm3, %xmm0
1866 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1867 ; SSE41-NEXT: pcmpgtd %xmm5, %xmm1
1868 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
1869 ; SSE41-NEXT: movdqa %xmm0, %xmm6
1870 ; SSE41-NEXT: pcmpeqd %xmm5, %xmm6
1871 ; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
1872 ; SSE41-NEXT: pand %xmm4, %xmm7
1873 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
1874 ; SSE41-NEXT: por %xmm7, %xmm1
1875 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,1]
1876 ; SSE41-NEXT: por %xmm1, %xmm4
1877 ; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
1878 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
1879 ; SSE41-NEXT: pand %xmm6, %xmm0
1880 ; SSE41-NEXT: por %xmm5, %xmm0
1881 ; SSE41-NEXT: movdqa %xmm2, %xmm5
1882 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm5
1883 ; SSE41-NEXT: movdqa %xmm1, %xmm0
1884 ; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
1885 ; SSE41-NEXT: psubq %xmm5, %xmm2
1886 ; SSE41-NEXT: psrlq $1, %xmm2
1887 ; SSE41-NEXT: movdqa %xmm4, %xmm1
1888 ; SSE41-NEXT: psrlq $32, %xmm1
1889 ; SSE41-NEXT: pmuludq %xmm2, %xmm1
1890 ; SSE41-NEXT: movdqa %xmm2, %xmm0
1891 ; SSE41-NEXT: psrlq $32, %xmm0
1892 ; SSE41-NEXT: pmuludq %xmm4, %xmm0
1893 ; SSE41-NEXT: paddq %xmm1, %xmm0
1894 ; SSE41-NEXT: psllq $32, %xmm0
1895 ; SSE41-NEXT: pmuludq %xmm4, %xmm2
1896 ; SSE41-NEXT: paddq %xmm3, %xmm0
1897 ; SSE41-NEXT: paddq %xmm2, %xmm0
1900 ; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
1901 ; AVX1-FALLBACK: # %bb.0:
1902 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
1903 ; AVX1-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
1904 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
1905 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1906 ; AVX1-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
1907 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1908 ; AVX1-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1909 ; AVX1-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1910 ; AVX1-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1911 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
1912 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1913 ; AVX1-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
1914 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1915 ; AVX1-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1916 ; AVX1-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1917 ; AVX1-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1918 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1919 ; AVX1-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1920 ; AVX1-FALLBACK-NEXT: retq
1922 ; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
1923 ; AVX2-FALLBACK: # %bb.0:
1924 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
1925 ; AVX2-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
1926 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
1927 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1928 ; AVX2-FALLBACK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm4
1929 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1930 ; AVX2-FALLBACK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1931 ; AVX2-FALLBACK-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1932 ; AVX2-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
1933 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm2
1934 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1935 ; AVX2-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm4
1936 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1937 ; AVX2-FALLBACK-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1938 ; AVX2-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
1939 ; AVX2-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1940 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1941 ; AVX2-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1942 ; AVX2-FALLBACK-NEXT: retq
1944 ; XOP-LABEL: vec128_i64_signed_mem_mem:
1946 ; XOP-NEXT: vmovdqa (%rdi), %xmm0
1947 ; XOP-NEXT: vmovdqa (%rsi), %xmm1
1948 ; XOP-NEXT: vpcomgtq %xmm1, %xmm0, %xmm2
1949 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm3
1950 ; XOP-NEXT: vpcomltq %xmm1, %xmm0, %xmm4
1951 ; XOP-NEXT: vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
1952 ; XOP-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
1953 ; XOP-NEXT: vpsubq %xmm4, %xmm1, %xmm1
1954 ; XOP-NEXT: vpsrlq $1, %xmm1, %xmm1
1955 ; XOP-NEXT: vpsrlq $32, %xmm3, %xmm2
1956 ; XOP-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
1957 ; XOP-NEXT: vpsrlq $32, %xmm1, %xmm4
1958 ; XOP-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
1959 ; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2
1960 ; XOP-NEXT: vpsllq $32, %xmm2, %xmm2
1961 ; XOP-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1962 ; XOP-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1963 ; XOP-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1966 ; AVX512F-LABEL: vec128_i64_signed_mem_mem:
1968 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
1969 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
1970 ; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
1971 ; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1972 ; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1973 ; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
1974 ; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm2
1975 ; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
1976 ; AVX512F-NEXT: vpsubq %xmm2, %xmm1, %xmm1
1977 ; AVX512F-NEXT: vpsrlq $1, %xmm1, %xmm1
1978 ; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
1979 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
1980 ; AVX512F-NEXT: vpsrlq $32, %xmm3, %xmm4
1981 ; AVX512F-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
1982 ; AVX512F-NEXT: vpaddq %xmm2, %xmm4, %xmm2
1983 ; AVX512F-NEXT: vpsllq $32, %xmm2, %xmm2
1984 ; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
1985 ; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0
1986 ; AVX512F-NEXT: vpaddq %xmm0, %xmm1, %xmm0
1987 ; AVX512F-NEXT: vzeroupper
1988 ; AVX512F-NEXT: retq
1990 ; AVX512VL-LABEL: vec128_i64_signed_mem_mem:
1991 ; AVX512VL: # %bb.0:
1992 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
1993 ; AVX512VL-NEXT: vmovdqa (%rsi), %xmm1
1994 ; AVX512VL-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
1995 ; AVX512VL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
1996 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
1997 ; AVX512VL-NEXT: vmovdqa64 %xmm2, %xmm3 {%k1}
1998 ; AVX512VL-NEXT: vpminsq %xmm1, %xmm0, %xmm2
1999 ; AVX512VL-NEXT: vpmaxsq %xmm1, %xmm0, %xmm1
2000 ; AVX512VL-NEXT: vpsubq %xmm2, %xmm1, %xmm1
2001 ; AVX512VL-NEXT: vpsrlq $1, %xmm1, %xmm1
2002 ; AVX512VL-NEXT: vpsrlq $32, %xmm3, %xmm2
2003 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm1, %xmm2
2004 ; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm4
2005 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm4, %xmm4
2006 ; AVX512VL-NEXT: vpaddq %xmm4, %xmm2, %xmm2
2007 ; AVX512VL-NEXT: vpsllq $32, %xmm2, %xmm2
2008 ; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
2009 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
2010 ; AVX512VL-NEXT: vpaddq %xmm0, %xmm1, %xmm0
2011 ; AVX512VL-NEXT: retq
2013 ; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
2014 ; AVX512BW-FALLBACK: # %bb.0:
2015 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
2016 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
2017 ; AVX512BW-FALLBACK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
2018 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2019 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
2020 ; AVX512BW-FALLBACK-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1}
2021 ; AVX512BW-FALLBACK-NEXT: vpminsq %zmm1, %zmm0, %zmm2
2022 ; AVX512BW-FALLBACK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm1
2023 ; AVX512BW-FALLBACK-NEXT: vpsubq %xmm2, %xmm1, %xmm1
2024 ; AVX512BW-FALLBACK-NEXT: vpsrlq $1, %xmm1, %xmm1
2025 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm1, %xmm2
2026 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
2027 ; AVX512BW-FALLBACK-NEXT: vpsrlq $32, %xmm3, %xmm4
2028 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
2029 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm2, %xmm4, %xmm2
2030 ; AVX512BW-FALLBACK-NEXT: vpsllq $32, %xmm2, %xmm2
2031 ; AVX512BW-FALLBACK-NEXT: vpmuludq %xmm3, %xmm1, %xmm1
2032 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
2033 ; AVX512BW-FALLBACK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
2034 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2035 ; AVX512BW-FALLBACK-NEXT: retq
2036 %a1 = load <2 x i64>, <2 x i64>* %a1_addr
2037 %a2 = load <2 x i64>, <2 x i64>* %a2_addr
2038 %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
2039 %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
2040 %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
2041 %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
2042 %t7 = sub <2 x i64> %t6, %t5
2043 %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
2044 %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
2045 %a10 = add nsw <2 x i64> %t9, %a1 ; signed
2049 ; ---------------------------------------------------------------------------- ;
2050 ; 16-bit width. 128 / 16 = 8 elts.
2051 ; ---------------------------------------------------------------------------- ;
2053 ; Values come from regs
2055 define <8 x i16> @vec128_i16_signed_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
2056 ; SSE-LABEL: vec128_i16_signed_reg_reg:
2058 ; SSE-NEXT: movdqa %xmm0, %xmm2
2059 ; SSE-NEXT: pcmpgtw %xmm1, %xmm2
2060 ; SSE-NEXT: por {{.*}}(%rip), %xmm2
2061 ; SSE-NEXT: movdqa %xmm0, %xmm3
2062 ; SSE-NEXT: pminsw %xmm1, %xmm3
2063 ; SSE-NEXT: pmaxsw %xmm0, %xmm1
2064 ; SSE-NEXT: psubw %xmm3, %xmm1
2065 ; SSE-NEXT: psrlw $1, %xmm1
2066 ; SSE-NEXT: pmullw %xmm1, %xmm2
2067 ; SSE-NEXT: paddw %xmm0, %xmm2
2068 ; SSE-NEXT: movdqa %xmm2, %xmm0
2071 ; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
2072 ; AVX1-FALLBACK: # %bb.0:
2073 ; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2074 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2075 ; AVX1-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2076 ; AVX1-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2077 ; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2078 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2079 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2080 ; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2081 ; AVX1-FALLBACK-NEXT: retq
2083 ; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
2084 ; AVX2-FALLBACK: # %bb.0:
2085 ; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2086 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2087 ; AVX2-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2088 ; AVX2-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2089 ; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2090 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2091 ; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2092 ; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2093 ; AVX2-FALLBACK-NEXT: retq
2095 ; XOP-LABEL: vec128_i16_signed_reg_reg:
2097 ; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
2098 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2099 ; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2100 ; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2101 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2102 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
2103 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
2106 ; AVX512F-LABEL: vec128_i16_signed_reg_reg:
2108 ; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2109 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2110 ; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2111 ; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2112 ; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2113 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2114 ; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2115 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2116 ; AVX512F-NEXT: retq
2118 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
2119 ; AVX512VL-FALLBACK: # %bb.0:
2120 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2121 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2122 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2123 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2124 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2125 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2126 ; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2127 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2128 ; AVX512VL-FALLBACK-NEXT: retq
2130 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
2131 ; AVX512BW-FALLBACK: # %bb.0:
2132 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
2133 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2134 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
2135 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2136 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2137 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2138 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2139 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2140 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2141 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2142 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2143 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2144 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2145 ; AVX512BW-FALLBACK-NEXT: retq
2147 ; AVX512VLBW-LABEL: vec128_i16_signed_reg_reg:
2148 ; AVX512VLBW: # %bb.0:
2149 ; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
2150 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2151 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2152 ; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
2153 ; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2154 ; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2155 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2156 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2157 ; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2158 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2159 ; AVX512VLBW-NEXT: retq
2160 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
2161 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2162 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2163 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2164 %t7 = sub <8 x i16> %t6, %t5
2165 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2166 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
2167 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
2171 define <8 x i16> @vec128_i16_unsigned_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
2172 ; SSE2-LABEL: vec128_i16_unsigned_reg_reg:
2174 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768]
2175 ; SSE2-NEXT: pxor %xmm3, %xmm1
2176 ; SSE2-NEXT: movdqa %xmm0, %xmm2
2177 ; SSE2-NEXT: pxor %xmm3, %xmm2
2178 ; SSE2-NEXT: movdqa %xmm2, %xmm4
2179 ; SSE2-NEXT: pcmpgtw %xmm1, %xmm4
2180 ; SSE2-NEXT: por {{.*}}(%rip), %xmm4
2181 ; SSE2-NEXT: movdqa %xmm2, %xmm5
2182 ; SSE2-NEXT: pminsw %xmm1, %xmm5
2183 ; SSE2-NEXT: pxor %xmm3, %xmm5
2184 ; SSE2-NEXT: pmaxsw %xmm1, %xmm2
2185 ; SSE2-NEXT: pxor %xmm3, %xmm2
2186 ; SSE2-NEXT: psubw %xmm5, %xmm2
2187 ; SSE2-NEXT: psrlw $1, %xmm2
2188 ; SSE2-NEXT: pmullw %xmm4, %xmm2
2189 ; SSE2-NEXT: paddw %xmm0, %xmm2
2190 ; SSE2-NEXT: movdqa %xmm2, %xmm0
2193 ; SSE41-LABEL: vec128_i16_unsigned_reg_reg:
2195 ; SSE41-NEXT: movdqa %xmm0, %xmm2
2196 ; SSE41-NEXT: pminuw %xmm1, %xmm2
2197 ; SSE41-NEXT: movdqa %xmm0, %xmm3
2198 ; SSE41-NEXT: pcmpeqw %xmm2, %xmm3
2199 ; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
2200 ; SSE41-NEXT: pxor %xmm3, %xmm4
2201 ; SSE41-NEXT: por {{.*}}(%rip), %xmm4
2202 ; SSE41-NEXT: pmaxuw %xmm0, %xmm1
2203 ; SSE41-NEXT: psubw %xmm2, %xmm1
2204 ; SSE41-NEXT: psrlw $1, %xmm1
2205 ; SSE41-NEXT: pmullw %xmm1, %xmm4
2206 ; SSE41-NEXT: paddw %xmm4, %xmm0
2209 ; AVX1-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
2210 ; AVX1-FALLBACK: # %bb.0:
2211 ; AVX1-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2212 ; AVX1-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
2213 ; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2214 ; AVX1-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
2215 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
2216 ; AVX1-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2217 ; AVX1-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2218 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2219 ; AVX1-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2220 ; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2221 ; AVX1-FALLBACK-NEXT: retq
2223 ; AVX2-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
2224 ; AVX2-FALLBACK: # %bb.0:
2225 ; AVX2-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2226 ; AVX2-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
2227 ; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
2228 ; AVX2-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
2229 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
2230 ; AVX2-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2231 ; AVX2-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2232 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2233 ; AVX2-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2234 ; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2235 ; AVX2-FALLBACK-NEXT: retq
2237 ; XOP-LABEL: vec128_i16_unsigned_reg_reg:
2239 ; XOP-NEXT: vpcomgtuw %xmm1, %xmm0, %xmm2
2240 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2241 ; XOP-NEXT: vpminuw %xmm1, %xmm0, %xmm3
2242 ; XOP-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2243 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2244 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
2245 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
2248 ; AVX512F-LABEL: vec128_i16_unsigned_reg_reg:
2250 ; AVX512F-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2251 ; AVX512F-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
2252 ; AVX512F-NEXT: vpternlogq $15, %zmm3, %zmm3, %zmm3
2253 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
2254 ; AVX512F-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2255 ; AVX512F-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2256 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2257 ; AVX512F-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2258 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2259 ; AVX512F-NEXT: vzeroupper
2260 ; AVX512F-NEXT: retq
2262 ; AVX512VL-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
2263 ; AVX512VL-FALLBACK: # %bb.0:
2264 ; AVX512VL-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2265 ; AVX512VL-FALLBACK-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm3
2266 ; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %xmm3, %xmm3, %xmm3
2267 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
2268 ; AVX512VL-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2269 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2270 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2271 ; AVX512VL-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2272 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2273 ; AVX512VL-FALLBACK-NEXT: retq
2275 ; AVX512BW-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
2276 ; AVX512BW-FALLBACK: # %bb.0:
2277 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
2278 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2279 ; AVX512BW-FALLBACK-NEXT: vpcmpnleuw %zmm1, %zmm0, %k1
2280 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2281 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2282 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2283 ; AVX512BW-FALLBACK-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2284 ; AVX512BW-FALLBACK-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2285 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2286 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2287 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2288 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2289 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2290 ; AVX512BW-FALLBACK-NEXT: retq
2292 ; AVX512VLBW-LABEL: vec128_i16_unsigned_reg_reg:
2293 ; AVX512VLBW: # %bb.0:
2294 ; AVX512VLBW-NEXT: vpcmpnleuw %xmm1, %xmm0, %k1
2295 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2296 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2297 ; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
2298 ; AVX512VLBW-NEXT: vpminuw %xmm1, %xmm0, %xmm2
2299 ; AVX512VLBW-NEXT: vpmaxuw %xmm1, %xmm0, %xmm1
2300 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2301 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2302 ; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2303 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2304 ; AVX512VLBW-NEXT: retq
2305 %t3 = icmp ugt <8 x i16> %a1, %a2
2306 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2307 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2308 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2309 %t7 = sub <8 x i16> %t6, %t5
2310 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2311 %t9 = mul <8 x i16> %t8, %t4
2312 %a10 = add <8 x i16> %t9, %a1
2316 ; Values are loaded. Only check signed case.
2318 define <8 x i16> @vec128_i16_signed_mem_reg(<8 x i16>* %a1_addr, <8 x i16> %a2) nounwind {
2319 ; SSE-LABEL: vec128_i16_signed_mem_reg:
2321 ; SSE-NEXT: movdqa (%rdi), %xmm1
2322 ; SSE-NEXT: movdqa %xmm1, %xmm2
2323 ; SSE-NEXT: pcmpgtw %xmm0, %xmm2
2324 ; SSE-NEXT: por {{.*}}(%rip), %xmm2
2325 ; SSE-NEXT: movdqa %xmm1, %xmm3
2326 ; SSE-NEXT: pminsw %xmm0, %xmm3
2327 ; SSE-NEXT: pmaxsw %xmm1, %xmm0
2328 ; SSE-NEXT: psubw %xmm3, %xmm0
2329 ; SSE-NEXT: psrlw $1, %xmm0
2330 ; SSE-NEXT: pmullw %xmm2, %xmm0
2331 ; SSE-NEXT: paddw %xmm1, %xmm0
2334 ; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
2335 ; AVX1-FALLBACK: # %bb.0:
2336 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2337 ; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
2338 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2339 ; AVX1-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2340 ; AVX1-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2341 ; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2342 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
2343 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2344 ; AVX1-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2345 ; AVX1-FALLBACK-NEXT: retq
2347 ; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
2348 ; AVX2-FALLBACK: # %bb.0:
2349 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2350 ; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
2351 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2352 ; AVX2-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2353 ; AVX2-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2354 ; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2355 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
2356 ; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2357 ; AVX2-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2358 ; AVX2-FALLBACK-NEXT: retq
2360 ; XOP-LABEL: vec128_i16_signed_mem_reg:
2362 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
2363 ; XOP-NEXT: vpcomgtw %xmm0, %xmm1, %xmm2
2364 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2365 ; XOP-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2366 ; XOP-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2367 ; XOP-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2368 ; XOP-NEXT: vpsrlw $1, %xmm0, %xmm0
2369 ; XOP-NEXT: vpmacsww %xmm1, %xmm2, %xmm0, %xmm0
2372 ; AVX512F-LABEL: vec128_i16_signed_mem_reg:
2374 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
2375 ; AVX512F-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
2376 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2377 ; AVX512F-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2378 ; AVX512F-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2379 ; AVX512F-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2380 ; AVX512F-NEXT: vpsrlw $1, %xmm0, %xmm0
2381 ; AVX512F-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2382 ; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2383 ; AVX512F-NEXT: retq
2385 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
2386 ; AVX512VL-FALLBACK: # %bb.0:
2387 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2388 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
2389 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2390 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm3
2391 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2392 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm0, %xmm0
2393 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
2394 ; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
2395 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2396 ; AVX512VL-FALLBACK-NEXT: retq
2398 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
2399 ; AVX512BW-FALLBACK: # %bb.0:
2400 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2401 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2402 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm0, %zmm1, %k1
2403 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2404 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2405 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2406 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm0, %xmm1, %xmm2
2407 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2408 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm0, %xmm0
2409 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
2410 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm0, %xmm0
2411 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2412 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2413 ; AVX512BW-FALLBACK-NEXT: retq
2415 ; AVX512VLBW-LABEL: vec128_i16_signed_mem_reg:
2416 ; AVX512VLBW: # %bb.0:
2417 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
2418 ; AVX512VLBW-NEXT: vpcmpgtw %xmm0, %xmm1, %k1
2419 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2420 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2421 ; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
2422 ; AVX512VLBW-NEXT: vpminsw %xmm0, %xmm1, %xmm2
2423 ; AVX512VLBW-NEXT: vpmaxsw %xmm0, %xmm1, %xmm0
2424 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm0, %xmm0
2425 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm0, %xmm0
2426 ; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm0, %xmm0
2427 ; AVX512VLBW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
2428 ; AVX512VLBW-NEXT: retq
2429 %a1 = load <8 x i16>, <8 x i16>* %a1_addr
2430 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
2431 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2432 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2433 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2434 %t7 = sub <8 x i16> %t6, %t5
2435 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2436 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
2437 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
2441 define <8 x i16> @vec128_i16_signed_reg_mem(<8 x i16> %a1, <8 x i16>* %a2_addr) nounwind {
2442 ; SSE-LABEL: vec128_i16_signed_reg_mem:
2444 ; SSE-NEXT: movdqa (%rdi), %xmm2
2445 ; SSE-NEXT: movdqa %xmm0, %xmm1
2446 ; SSE-NEXT: pcmpgtw %xmm2, %xmm1
2447 ; SSE-NEXT: por {{.*}}(%rip), %xmm1
2448 ; SSE-NEXT: movdqa %xmm0, %xmm3
2449 ; SSE-NEXT: pminsw %xmm2, %xmm3
2450 ; SSE-NEXT: pmaxsw %xmm0, %xmm2
2451 ; SSE-NEXT: psubw %xmm3, %xmm2
2452 ; SSE-NEXT: psrlw $1, %xmm2
2453 ; SSE-NEXT: pmullw %xmm2, %xmm1
2454 ; SSE-NEXT: paddw %xmm0, %xmm1
2455 ; SSE-NEXT: movdqa %xmm1, %xmm0
2458 ; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
2459 ; AVX1-FALLBACK: # %bb.0:
2460 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2461 ; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2462 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2463 ; AVX1-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2464 ; AVX1-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2465 ; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2466 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2467 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2468 ; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2469 ; AVX1-FALLBACK-NEXT: retq
2471 ; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
2472 ; AVX2-FALLBACK: # %bb.0:
2473 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2474 ; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2475 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2476 ; AVX2-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2477 ; AVX2-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2478 ; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2479 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2480 ; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2481 ; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2482 ; AVX2-FALLBACK-NEXT: retq
2484 ; XOP-LABEL: vec128_i16_signed_reg_mem:
2486 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
2487 ; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
2488 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2489 ; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2490 ; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2491 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2492 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
2493 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
2496 ; AVX512F-LABEL: vec128_i16_signed_reg_mem:
2498 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
2499 ; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2500 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2501 ; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2502 ; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2503 ; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2504 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2505 ; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2506 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2507 ; AVX512F-NEXT: retq
2509 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
2510 ; AVX512VL-FALLBACK: # %bb.0:
2511 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2512 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2513 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2514 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2515 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2516 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2517 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2518 ; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2519 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2520 ; AVX512VL-FALLBACK-NEXT: retq
2522 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
2523 ; AVX512BW-FALLBACK: # %bb.0:
2524 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2525 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
2526 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
2527 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2528 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2529 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2530 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2531 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2532 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2533 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2534 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2535 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2536 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2537 ; AVX512BW-FALLBACK-NEXT: retq
2539 ; AVX512VLBW-LABEL: vec128_i16_signed_reg_mem:
2540 ; AVX512VLBW: # %bb.0:
2541 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
2542 ; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
2543 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2544 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2545 ; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
2546 ; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2547 ; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2548 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2549 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2550 ; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2551 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2552 ; AVX512VLBW-NEXT: retq
2553 %a2 = load <8 x i16>, <8 x i16>* %a2_addr
2554 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
2555 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2556 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2557 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2558 %t7 = sub <8 x i16> %t6, %t5
2559 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2560 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
2561 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
2565 define <8 x i16> @vec128_i16_signed_mem_mem(<8 x i16>* %a1_addr, <8 x i16>* %a2_addr) nounwind {
2566 ; SSE-LABEL: vec128_i16_signed_mem_mem:
2568 ; SSE-NEXT: movdqa (%rdi), %xmm1
2569 ; SSE-NEXT: movdqa (%rsi), %xmm0
2570 ; SSE-NEXT: movdqa %xmm1, %xmm2
2571 ; SSE-NEXT: pcmpgtw %xmm0, %xmm2
2572 ; SSE-NEXT: por {{.*}}(%rip), %xmm2
2573 ; SSE-NEXT: movdqa %xmm1, %xmm3
2574 ; SSE-NEXT: pminsw %xmm0, %xmm3
2575 ; SSE-NEXT: pmaxsw %xmm1, %xmm0
2576 ; SSE-NEXT: psubw %xmm3, %xmm0
2577 ; SSE-NEXT: psrlw $1, %xmm0
2578 ; SSE-NEXT: pmullw %xmm2, %xmm0
2579 ; SSE-NEXT: paddw %xmm1, %xmm0
2582 ; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
2583 ; AVX1-FALLBACK: # %bb.0:
2584 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
2585 ; AVX1-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
2586 ; AVX1-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2587 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2588 ; AVX1-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2589 ; AVX1-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2590 ; AVX1-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2591 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2592 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2593 ; AVX1-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2594 ; AVX1-FALLBACK-NEXT: retq
2596 ; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
2597 ; AVX2-FALLBACK: # %bb.0:
2598 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
2599 ; AVX2-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
2600 ; AVX2-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2601 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2602 ; AVX2-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2603 ; AVX2-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2604 ; AVX2-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2605 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2606 ; AVX2-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2607 ; AVX2-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2608 ; AVX2-FALLBACK-NEXT: retq
2610 ; XOP-LABEL: vec128_i16_signed_mem_mem:
2612 ; XOP-NEXT: vmovdqa (%rdi), %xmm0
2613 ; XOP-NEXT: vmovdqa (%rsi), %xmm1
2614 ; XOP-NEXT: vpcomgtw %xmm1, %xmm0, %xmm2
2615 ; XOP-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2616 ; XOP-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2617 ; XOP-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2618 ; XOP-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2619 ; XOP-NEXT: vpsrlw $1, %xmm1, %xmm1
2620 ; XOP-NEXT: vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
2623 ; AVX512F-LABEL: vec128_i16_signed_mem_mem:
2625 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
2626 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
2627 ; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2628 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2629 ; AVX512F-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2630 ; AVX512F-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2631 ; AVX512F-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2632 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2633 ; AVX512F-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2634 ; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2635 ; AVX512F-NEXT: retq
2637 ; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
2638 ; AVX512VL-FALLBACK: # %bb.0:
2639 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
2640 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
2641 ; AVX512VL-FALLBACK-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm2
2642 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2643 ; AVX512VL-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm3
2644 ; AVX512VL-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2645 ; AVX512VL-FALLBACK-NEXT: vpsubw %xmm3, %xmm1, %xmm1
2646 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2647 ; AVX512VL-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2648 ; AVX512VL-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2649 ; AVX512VL-FALLBACK-NEXT: retq
2651 ; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
2652 ; AVX512BW-FALLBACK: # %bb.0:
2653 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
2654 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
2655 ; AVX512BW-FALLBACK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
2656 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2657 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2658 ; AVX512BW-FALLBACK-NEXT: vmovdqu16 %zmm2, %zmm3 {%k1}
2659 ; AVX512BW-FALLBACK-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2660 ; AVX512BW-FALLBACK-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2661 ; AVX512BW-FALLBACK-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2662 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2663 ; AVX512BW-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2664 ; AVX512BW-FALLBACK-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2665 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2666 ; AVX512BW-FALLBACK-NEXT: retq
2668 ; AVX512VLBW-LABEL: vec128_i16_signed_mem_mem:
2669 ; AVX512VLBW: # %bb.0:
2670 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm0
2671 ; AVX512VLBW-NEXT: vmovdqa (%rsi), %xmm1
2672 ; AVX512VLBW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
2673 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2674 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
2675 ; AVX512VLBW-NEXT: vmovdqu16 %xmm2, %xmm3 {%k1}
2676 ; AVX512VLBW-NEXT: vpminsw %xmm1, %xmm0, %xmm2
2677 ; AVX512VLBW-NEXT: vpmaxsw %xmm1, %xmm0, %xmm1
2678 ; AVX512VLBW-NEXT: vpsubw %xmm2, %xmm1, %xmm1
2679 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2680 ; AVX512VLBW-NEXT: vpmullw %xmm3, %xmm1, %xmm1
2681 ; AVX512VLBW-NEXT: vpaddw %xmm0, %xmm1, %xmm0
2682 ; AVX512VLBW-NEXT: retq
2683 %a1 = load <8 x i16>, <8 x i16>* %a1_addr
2684 %a2 = load <8 x i16>, <8 x i16>* %a2_addr
2685 %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
2686 %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2687 %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
2688 %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
2689 %t7 = sub <8 x i16> %t6, %t5
2690 %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
2691 %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
2692 %a10 = add nsw <8 x i16> %t9, %a1 ; signed
2696 ; ---------------------------------------------------------------------------- ;
2697 ; 8-bit width. 128 / 8 = 16 elts.
2698 ; ---------------------------------------------------------------------------- ;
2700 ; Values come from regs
2702 define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
2703 ; SSE2-LABEL: vec128_i8_signed_reg_reg:
2705 ; SSE2-NEXT: movdqa %xmm0, %xmm2
2706 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
2707 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2708 ; SSE2-NEXT: por %xmm2, %xmm3
2709 ; SSE2-NEXT: movdqa %xmm1, %xmm4
2710 ; SSE2-NEXT: pcmpgtb %xmm0, %xmm4
2711 ; SSE2-NEXT: movdqa %xmm0, %xmm5
2712 ; SSE2-NEXT: pand %xmm4, %xmm5
2713 ; SSE2-NEXT: pandn %xmm1, %xmm4
2714 ; SSE2-NEXT: por %xmm5, %xmm4
2715 ; SSE2-NEXT: movdqa %xmm0, %xmm5
2716 ; SSE2-NEXT: pand %xmm2, %xmm5
2717 ; SSE2-NEXT: pandn %xmm1, %xmm2
2718 ; SSE2-NEXT: por %xmm5, %xmm2
2719 ; SSE2-NEXT: psubb %xmm4, %xmm2
2720 ; SSE2-NEXT: psrlw $1, %xmm2
2721 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
2722 ; SSE2-NEXT: movdqa %xmm2, %xmm1
2723 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
2724 ; SSE2-NEXT: movdqa %xmm3, %xmm4
2725 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
2726 ; SSE2-NEXT: pmullw %xmm1, %xmm4
2727 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2728 ; SSE2-NEXT: pand %xmm1, %xmm4
2729 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
2730 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
2731 ; SSE2-NEXT: pmullw %xmm3, %xmm2
2732 ; SSE2-NEXT: pand %xmm1, %xmm2
2733 ; SSE2-NEXT: packuswb %xmm4, %xmm2
2734 ; SSE2-NEXT: paddb %xmm0, %xmm2
2735 ; SSE2-NEXT: movdqa %xmm2, %xmm0
2738 ; SSE41-LABEL: vec128_i8_signed_reg_reg:
2740 ; SSE41-NEXT: movdqa %xmm0, %xmm2
2741 ; SSE41-NEXT: pcmpgtb %xmm1, %xmm2
2742 ; SSE41-NEXT: por {{.*}}(%rip), %xmm2
2743 ; SSE41-NEXT: movdqa %xmm0, %xmm3
2744 ; SSE41-NEXT: pminsb %xmm1, %xmm3
2745 ; SSE41-NEXT: pmaxsb %xmm0, %xmm1
2746 ; SSE41-NEXT: psubb %xmm3, %xmm1
2747 ; SSE41-NEXT: psrlw $1, %xmm1
2748 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
2749 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2750 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
2751 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2752 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
2753 ; SSE41-NEXT: pmullw %xmm1, %xmm2
2754 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2755 ; SSE41-NEXT: pand %xmm1, %xmm2
2756 ; SSE41-NEXT: pmullw %xmm4, %xmm3
2757 ; SSE41-NEXT: pand %xmm1, %xmm3
2758 ; SSE41-NEXT: packuswb %xmm2, %xmm3
2759 ; SSE41-NEXT: paddb %xmm3, %xmm0
2762 ; AVX1-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2763 ; AVX1-FALLBACK: # %bb.0:
2764 ; AVX1-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
2765 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2766 ; AVX1-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2767 ; AVX1-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2768 ; AVX1-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2769 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2770 ; AVX1-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2771 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
2772 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
2773 ; AVX1-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2774 ; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
2775 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm3, %xmm3
2776 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2777 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2778 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2779 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm1, %xmm1
2780 ; AVX1-FALLBACK-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
2781 ; AVX1-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2782 ; AVX1-FALLBACK-NEXT: retq
2784 ; AVX2-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2785 ; AVX2-FALLBACK: # %bb.0:
2786 ; AVX2-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
2787 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2788 ; AVX2-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2789 ; AVX2-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2790 ; AVX2-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2791 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2792 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2793 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2794 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2795 ; AVX2-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2796 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
2797 ; AVX2-FALLBACK-NEXT: vextracti128 $1, %ymm1, %xmm2
2798 ; AVX2-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2799 ; AVX2-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2800 ; AVX2-FALLBACK-NEXT: vzeroupper
2801 ; AVX2-FALLBACK-NEXT: retq
2803 ; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2804 ; XOP-FALLBACK: # %bb.0:
2805 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
2806 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2807 ; XOP-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2808 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2809 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2810 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2811 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2812 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
2813 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
2814 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2815 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2816 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2817 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2818 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2819 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2820 ; XOP-FALLBACK-NEXT: retq
2822 ; XOPAVX1-LABEL: vec128_i8_signed_reg_reg:
2824 ; XOPAVX1-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
2825 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2826 ; XOPAVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2827 ; XOPAVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2828 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2829 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2830 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2831 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
2832 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
2833 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
2834 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2835 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
2836 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
2837 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
2838 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2839 ; XOPAVX1-NEXT: retq
2841 ; XOPAVX2-LABEL: vec128_i8_signed_reg_reg:
2843 ; XOPAVX2-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
2844 ; XOPAVX2-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2845 ; XOPAVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2846 ; XOPAVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2847 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2848 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
2849 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
2850 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2851 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2852 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2853 ; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
2854 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
2855 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
2856 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2857 ; XOPAVX2-NEXT: vzeroupper
2858 ; XOPAVX2-NEXT: retq
2860 ; AVX512F-LABEL: vec128_i8_signed_reg_reg:
2862 ; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
2863 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2864 ; AVX512F-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2865 ; AVX512F-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2866 ; AVX512F-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2867 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
2868 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2869 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2870 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2871 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2872 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
2873 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
2874 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2875 ; AVX512F-NEXT: vzeroupper
2876 ; AVX512F-NEXT: retq
2878 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2879 ; AVX512VL-FALLBACK: # %bb.0:
2880 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
2881 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
2882 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
2883 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2884 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
2885 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2886 ; AVX512VL-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2887 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2888 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
2889 ; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2890 ; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
2891 ; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
2892 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2893 ; AVX512VL-FALLBACK-NEXT: vzeroupper
2894 ; AVX512VL-FALLBACK-NEXT: retq
2896 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
2897 ; AVX512BW-FALLBACK: # %bb.0:
2898 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
2899 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
2900 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
2901 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2902 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2903 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
2904 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
2905 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2906 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2907 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
2908 ; AVX512BW-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2909 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2910 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
2911 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2912 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
2913 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2914 ; AVX512BW-FALLBACK-NEXT: vzeroupper
2915 ; AVX512BW-FALLBACK-NEXT: retq
2917 ; AVX512VLBW-LABEL: vec128_i8_signed_reg_reg:
2918 ; AVX512VLBW: # %bb.0:
2919 ; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
2920 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
2921 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
2922 ; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
2923 ; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
2924 ; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
2925 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
2926 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
2927 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
2928 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
2929 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
2930 ; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
2931 ; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
2932 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
2933 ; AVX512VLBW-NEXT: vzeroupper
2934 ; AVX512VLBW-NEXT: retq
2935 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
2936 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
2937 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
2938 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
2939 %t7 = sub <16 x i8> %t6, %t5
2940 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
2941 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
2942 %a10 = add nsw <16 x i8> %t9, %a1 ; signed
2946 define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
2947 ; SSE2-LABEL: vec128_i8_unsigned_reg_reg:
2949 ; SSE2-NEXT: movdqa %xmm0, %xmm3
2950 ; SSE2-NEXT: pminub %xmm1, %xmm3
2951 ; SSE2-NEXT: movdqa %xmm0, %xmm4
2952 ; SSE2-NEXT: pcmpeqb %xmm3, %xmm4
2953 ; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
2954 ; SSE2-NEXT: pxor %xmm4, %xmm2
2955 ; SSE2-NEXT: por {{.*}}(%rip), %xmm2
2956 ; SSE2-NEXT: pmaxub %xmm0, %xmm1
2957 ; SSE2-NEXT: psubb %xmm3, %xmm1
2958 ; SSE2-NEXT: psrlw $1, %xmm1
2959 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
2960 ; SSE2-NEXT: movdqa %xmm1, %xmm3
2961 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
2962 ; SSE2-NEXT: movdqa %xmm2, %xmm4
2963 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
2964 ; SSE2-NEXT: pmullw %xmm3, %xmm4
2965 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
2966 ; SSE2-NEXT: pand %xmm3, %xmm4
2967 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
2968 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
2969 ; SSE2-NEXT: pmullw %xmm1, %xmm2
2970 ; SSE2-NEXT: pand %xmm3, %xmm2
2971 ; SSE2-NEXT: packuswb %xmm4, %xmm2
2972 ; SSE2-NEXT: paddb %xmm2, %xmm0
2975 ; SSE41-LABEL: vec128_i8_unsigned_reg_reg:
2977 ; SSE41-NEXT: movdqa %xmm0, %xmm2
2978 ; SSE41-NEXT: pminub %xmm1, %xmm2
2979 ; SSE41-NEXT: movdqa %xmm0, %xmm3
2980 ; SSE41-NEXT: pcmpeqb %xmm2, %xmm3
2981 ; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
2982 ; SSE41-NEXT: pxor %xmm3, %xmm4
2983 ; SSE41-NEXT: por {{.*}}(%rip), %xmm4
2984 ; SSE41-NEXT: pmaxub %xmm0, %xmm1
2985 ; SSE41-NEXT: psubb %xmm2, %xmm1
2986 ; SSE41-NEXT: psrlw $1, %xmm1
2987 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
2988 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
2989 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
2990 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
2991 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
2992 ; SSE41-NEXT: pmullw %xmm1, %xmm4
2993 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
2994 ; SSE41-NEXT: pand %xmm1, %xmm4
2995 ; SSE41-NEXT: pmullw %xmm3, %xmm2
2996 ; SSE41-NEXT: pand %xmm1, %xmm2
2997 ; SSE41-NEXT: packuswb %xmm4, %xmm2
2998 ; SSE41-NEXT: paddb %xmm2, %xmm0
3001 ; AVX1-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
3002 ; AVX1-FALLBACK: # %bb.0:
3003 ; AVX1-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
3004 ; AVX1-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
3005 ; AVX1-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
3006 ; AVX1-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
3007 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
3008 ; AVX1-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3009 ; AVX1-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3010 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3011 ; AVX1-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3012 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3013 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
3014 ; AVX1-FALLBACK-NEXT: vpmullw %xmm4, %xmm2, %xmm2
3015 ; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
3016 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm2, %xmm2
3017 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3018 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
3019 ; AVX1-FALLBACK-NEXT: vpmullw %xmm3, %xmm1, %xmm1
3020 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm1, %xmm1
3021 ; AVX1-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3022 ; AVX1-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3023 ; AVX1-FALLBACK-NEXT: retq
3025 ; AVX2-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
3026 ; AVX2-FALLBACK: # %bb.0:
3027 ; AVX2-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
3028 ; AVX2-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
3029 ; AVX2-FALLBACK-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
3030 ; AVX2-FALLBACK-NEXT: vpxor %xmm4, %xmm3, %xmm3
3031 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
3032 ; AVX2-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3033 ; AVX2-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3034 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3035 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3036 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3037 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3038 ; AVX2-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3039 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
3040 ; AVX2-FALLBACK-NEXT: vextracti128 $1, %ymm1, %xmm2
3041 ; AVX2-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3042 ; AVX2-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3043 ; AVX2-FALLBACK-NEXT: vzeroupper
3044 ; AVX2-FALLBACK-NEXT: retq
3046 ; XOP-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
3047 ; XOP-FALLBACK: # %bb.0:
3048 ; XOP-FALLBACK-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
3049 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3050 ; XOP-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm3
3051 ; XOP-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3052 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3053 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3054 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3055 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3056 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3057 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3058 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3059 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3060 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3061 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3062 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3063 ; XOP-FALLBACK-NEXT: retq
3065 ; XOPAVX1-LABEL: vec128_i8_unsigned_reg_reg:
3067 ; XOPAVX1-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
3068 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3069 ; XOPAVX1-NEXT: vpminub %xmm1, %xmm0, %xmm3
3070 ; XOPAVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3071 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3072 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3073 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3074 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3075 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3076 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3077 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3078 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3079 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3080 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3081 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3082 ; XOPAVX1-NEXT: retq
3084 ; XOPAVX2-LABEL: vec128_i8_unsigned_reg_reg:
3086 ; XOPAVX2-NEXT: vpcomgtub %xmm1, %xmm0, %xmm2
3087 ; XOPAVX2-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3088 ; XOPAVX2-NEXT: vpminub %xmm1, %xmm0, %xmm3
3089 ; XOPAVX2-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3090 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3091 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3092 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3093 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3094 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3095 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3096 ; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
3097 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
3098 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3099 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3100 ; XOPAVX2-NEXT: vzeroupper
3101 ; XOPAVX2-NEXT: retq
3103 ; AVX512F-LABEL: vec128_i8_unsigned_reg_reg:
3105 ; AVX512F-NEXT: vpminub %xmm1, %xmm0, %xmm2
3106 ; AVX512F-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
3107 ; AVX512F-NEXT: vpternlogq $15, %zmm3, %zmm3, %zmm3
3108 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
3109 ; AVX512F-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3110 ; AVX512F-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3111 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
3112 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3113 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3114 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3115 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3116 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3117 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
3118 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3119 ; AVX512F-NEXT: vzeroupper
3120 ; AVX512F-NEXT: retq
3122 ; AVX512VL-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
3123 ; AVX512VL-FALLBACK: # %bb.0:
3124 ; AVX512VL-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
3125 ; AVX512VL-FALLBACK-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm3
3126 ; AVX512VL-FALLBACK-NEXT: vpternlogq $15, %xmm3, %xmm3, %xmm3
3127 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm3, %xmm3
3128 ; AVX512VL-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3129 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3130 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3131 ; AVX512VL-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3132 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3133 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3134 ; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3135 ; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3136 ; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
3137 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3138 ; AVX512VL-FALLBACK-NEXT: vzeroupper
3139 ; AVX512VL-FALLBACK-NEXT: retq
3141 ; AVX512BW-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
3142 ; AVX512BW-FALLBACK: # %bb.0:
3143 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
3144 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
3145 ; AVX512BW-FALLBACK-NEXT: vpcmpnleub %zmm1, %zmm0, %k1
3146 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3147 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3148 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
3149 ; AVX512BW-FALLBACK-NEXT: vpminub %xmm1, %xmm0, %xmm2
3150 ; AVX512BW-FALLBACK-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3151 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3152 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3153 ; AVX512BW-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3154 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3155 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3156 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3157 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
3158 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3159 ; AVX512BW-FALLBACK-NEXT: vzeroupper
3160 ; AVX512BW-FALLBACK-NEXT: retq
3162 ; AVX512VLBW-LABEL: vec128_i8_unsigned_reg_reg:
3163 ; AVX512VLBW: # %bb.0:
3164 ; AVX512VLBW-NEXT: vpcmpnleub %xmm1, %xmm0, %k1
3165 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3166 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3167 ; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
3168 ; AVX512VLBW-NEXT: vpminub %xmm1, %xmm0, %xmm2
3169 ; AVX512VLBW-NEXT: vpmaxub %xmm1, %xmm0, %xmm1
3170 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3171 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
3172 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3173 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3174 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3175 ; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3176 ; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
3177 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3178 ; AVX512VLBW-NEXT: vzeroupper
3179 ; AVX512VLBW-NEXT: retq
3180 %t3 = icmp ugt <16 x i8> %a1, %a2
3181 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3182 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
3183 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
3184 %t7 = sub <16 x i8> %t6, %t5
3185 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3186 %t9 = mul <16 x i8> %t8, %t4
3187 %a10 = add <16 x i8> %t9, %a1
3191 ; Values are loaded. Only check signed case.
3193 define <16 x i8> @vec128_i8_signed_mem_reg(<16 x i8>* %a1_addr, <16 x i8> %a2) nounwind {
3194 ; SSE2-LABEL: vec128_i8_signed_mem_reg:
3196 ; SSE2-NEXT: movdqa %xmm0, %xmm1
3197 ; SSE2-NEXT: movdqa (%rdi), %xmm2
3198 ; SSE2-NEXT: movdqa %xmm2, %xmm3
3199 ; SSE2-NEXT: pcmpgtb %xmm0, %xmm3
3200 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3201 ; SSE2-NEXT: por %xmm3, %xmm0
3202 ; SSE2-NEXT: movdqa %xmm1, %xmm4
3203 ; SSE2-NEXT: pcmpgtb %xmm2, %xmm4
3204 ; SSE2-NEXT: movdqa %xmm2, %xmm5
3205 ; SSE2-NEXT: pand %xmm4, %xmm5
3206 ; SSE2-NEXT: pandn %xmm1, %xmm4
3207 ; SSE2-NEXT: por %xmm5, %xmm4
3208 ; SSE2-NEXT: movdqa %xmm2, %xmm5
3209 ; SSE2-NEXT: pand %xmm3, %xmm5
3210 ; SSE2-NEXT: pandn %xmm1, %xmm3
3211 ; SSE2-NEXT: por %xmm5, %xmm3
3212 ; SSE2-NEXT: psubb %xmm4, %xmm3
3213 ; SSE2-NEXT: psrlw $1, %xmm3
3214 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm3
3215 ; SSE2-NEXT: movdqa %xmm3, %xmm1
3216 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3217 ; SSE2-NEXT: movdqa %xmm0, %xmm4
3218 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
3219 ; SSE2-NEXT: pmullw %xmm1, %xmm4
3220 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
3221 ; SSE2-NEXT: pand %xmm1, %xmm4
3222 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
3223 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3224 ; SSE2-NEXT: pmullw %xmm3, %xmm0
3225 ; SSE2-NEXT: pand %xmm1, %xmm0
3226 ; SSE2-NEXT: packuswb %xmm4, %xmm0
3227 ; SSE2-NEXT: paddb %xmm2, %xmm0
3230 ; SSE41-LABEL: vec128_i8_signed_mem_reg:
3232 ; SSE41-NEXT: movdqa (%rdi), %xmm2
3233 ; SSE41-NEXT: movdqa %xmm2, %xmm3
3234 ; SSE41-NEXT: pcmpgtb %xmm0, %xmm3
3235 ; SSE41-NEXT: por {{.*}}(%rip), %xmm3
3236 ; SSE41-NEXT: movdqa %xmm2, %xmm1
3237 ; SSE41-NEXT: pminsb %xmm0, %xmm1
3238 ; SSE41-NEXT: pmaxsb %xmm2, %xmm0
3239 ; SSE41-NEXT: psubb %xmm1, %xmm0
3240 ; SSE41-NEXT: psrlw $1, %xmm0
3241 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
3242 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3243 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3244 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
3245 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
3246 ; SSE41-NEXT: pmullw %xmm0, %xmm3
3247 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
3248 ; SSE41-NEXT: pand %xmm0, %xmm3
3249 ; SSE41-NEXT: pmullw %xmm4, %xmm1
3250 ; SSE41-NEXT: pand %xmm0, %xmm1
3251 ; SSE41-NEXT: packuswb %xmm3, %xmm1
3252 ; SSE41-NEXT: paddb %xmm2, %xmm1
3253 ; SSE41-NEXT: movdqa %xmm1, %xmm0
3256 ; AVX1-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
3257 ; AVX1-FALLBACK: # %bb.0:
3258 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3259 ; AVX1-FALLBACK-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
3260 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3261 ; AVX1-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3262 ; AVX1-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3263 ; AVX1-FALLBACK-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3264 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
3265 ; AVX1-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
3266 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3267 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3268 ; AVX1-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3269 ; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
3270 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm3, %xmm3
3271 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3272 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3273 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
3274 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm0, %xmm0
3275 ; AVX1-FALLBACK-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
3276 ; AVX1-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3277 ; AVX1-FALLBACK-NEXT: retq
3279 ; AVX2-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
3280 ; AVX2-FALLBACK: # %bb.0:
3281 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3282 ; AVX2-FALLBACK-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
3283 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3284 ; AVX2-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3285 ; AVX2-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3286 ; AVX2-FALLBACK-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3287 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
3288 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
3289 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3290 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3291 ; AVX2-FALLBACK-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3292 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
3293 ; AVX2-FALLBACK-NEXT: vextracti128 $1, %ymm0, %xmm2
3294 ; AVX2-FALLBACK-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
3295 ; AVX2-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3296 ; AVX2-FALLBACK-NEXT: vzeroupper
3297 ; AVX2-FALLBACK-NEXT: retq
3299 ; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
3300 ; XOP-FALLBACK: # %bb.0:
3301 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3302 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm0, %xmm1, %xmm2
3303 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3304 ; XOP-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3305 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3306 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3307 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3308 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm0, %xmm0
3309 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3310 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3311 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3312 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3313 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3314 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm0, %xmm0
3315 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3316 ; XOP-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3317 ; XOP-FALLBACK-NEXT: retq
3319 ; XOPAVX1-LABEL: vec128_i8_signed_mem_reg:
3321 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
3322 ; XOPAVX1-NEXT: vpcomgtb %xmm0, %xmm1, %xmm2
3323 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3324 ; XOPAVX1-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3325 ; XOPAVX1-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3326 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3327 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3328 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm0
3329 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
3330 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3331 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3332 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
3333 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3334 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
3335 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3336 ; XOPAVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3337 ; XOPAVX1-NEXT: retq
3339 ; XOPAVX2-LABEL: vec128_i8_signed_mem_reg:
3341 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
3342 ; XOPAVX2-NEXT: vpcomgtb %xmm0, %xmm1, %xmm2
3343 ; XOPAVX2-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3344 ; XOPAVX2-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3345 ; XOPAVX2-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3346 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3347 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3348 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm0, %xmm0
3349 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3350 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3351 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3352 ; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
3353 ; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
3354 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
3355 ; XOPAVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3356 ; XOPAVX2-NEXT: vzeroupper
3357 ; XOPAVX2-NEXT: retq
3359 ; AVX512F-LABEL: vec128_i8_signed_mem_reg:
3361 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
3362 ; AVX512F-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
3363 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3364 ; AVX512F-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3365 ; AVX512F-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3366 ; AVX512F-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3367 ; AVX512F-NEXT: vpsrlw $1, %xmm0, %xmm0
3368 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
3369 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3370 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3371 ; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3372 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
3373 ; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
3374 ; AVX512F-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3375 ; AVX512F-NEXT: vzeroupper
3376 ; AVX512F-NEXT: retq
3378 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
3379 ; AVX512VL-FALLBACK: # %bb.0:
3380 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3381 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
3382 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3383 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm3
3384 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3385 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm0, %xmm0
3386 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
3387 ; AVX512VL-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
3388 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3389 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3390 ; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3391 ; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
3392 ; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm0, %xmm0
3393 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3394 ; AVX512VL-FALLBACK-NEXT: vzeroupper
3395 ; AVX512VL-FALLBACK-NEXT: retq
3397 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
3398 ; AVX512BW-FALLBACK: # %bb.0:
3399 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
3400 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3401 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm0, %zmm1, %k1
3402 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3403 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3404 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
3405 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm0, %xmm1, %xmm2
3406 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3407 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm0, %xmm0
3408 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm0, %xmm0
3409 ; AVX512BW-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
3410 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3411 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3412 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3413 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm0, %ymm0
3414 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3415 ; AVX512BW-FALLBACK-NEXT: vzeroupper
3416 ; AVX512BW-FALLBACK-NEXT: retq
3418 ; AVX512VLBW-LABEL: vec128_i8_signed_mem_reg:
3419 ; AVX512VLBW: # %bb.0:
3420 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
3421 ; AVX512VLBW-NEXT: vpcmpgtb %xmm0, %xmm1, %k1
3422 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3423 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3424 ; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
3425 ; AVX512VLBW-NEXT: vpminsb %xmm0, %xmm1, %xmm2
3426 ; AVX512VLBW-NEXT: vpmaxsb %xmm0, %xmm1, %xmm0
3427 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm0, %xmm0
3428 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm0, %xmm0
3429 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
3430 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
3431 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3432 ; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm0, %ymm0
3433 ; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
3434 ; AVX512VLBW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
3435 ; AVX512VLBW-NEXT: vzeroupper
3436 ; AVX512VLBW-NEXT: retq
3437 %a1 = load <16 x i8>, <16 x i8>* %a1_addr
3438 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
3439 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3440 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
3441 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
3442 %t7 = sub <16 x i8> %t6, %t5
3443 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3444 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
3445 %a10 = add nsw <16 x i8> %t9, %a1 ; signed
3449 define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, <16 x i8>* %a2_addr) nounwind {
3450 ; SSE2-LABEL: vec128_i8_signed_reg_mem:
3452 ; SSE2-NEXT: movdqa (%rdi), %xmm3
3453 ; SSE2-NEXT: movdqa %xmm0, %xmm1
3454 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm1
3455 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3456 ; SSE2-NEXT: por %xmm1, %xmm2
3457 ; SSE2-NEXT: movdqa %xmm3, %xmm4
3458 ; SSE2-NEXT: pcmpgtb %xmm0, %xmm4
3459 ; SSE2-NEXT: movdqa %xmm0, %xmm5
3460 ; SSE2-NEXT: pand %xmm4, %xmm5
3461 ; SSE2-NEXT: pandn %xmm3, %xmm4
3462 ; SSE2-NEXT: por %xmm5, %xmm4
3463 ; SSE2-NEXT: movdqa %xmm0, %xmm5
3464 ; SSE2-NEXT: pand %xmm1, %xmm5
3465 ; SSE2-NEXT: pandn %xmm3, %xmm1
3466 ; SSE2-NEXT: por %xmm5, %xmm1
3467 ; SSE2-NEXT: psubb %xmm4, %xmm1
3468 ; SSE2-NEXT: psrlw $1, %xmm1
3469 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
3470 ; SSE2-NEXT: movdqa %xmm1, %xmm3
3471 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
3472 ; SSE2-NEXT: movdqa %xmm2, %xmm4
3473 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
3474 ; SSE2-NEXT: pmullw %xmm3, %xmm4
3475 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
3476 ; SSE2-NEXT: pand %xmm3, %xmm4
3477 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
3478 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3479 ; SSE2-NEXT: pmullw %xmm2, %xmm1
3480 ; SSE2-NEXT: pand %xmm3, %xmm1
3481 ; SSE2-NEXT: packuswb %xmm4, %xmm1
3482 ; SSE2-NEXT: paddb %xmm0, %xmm1
3483 ; SSE2-NEXT: movdqa %xmm1, %xmm0
3486 ; SSE41-LABEL: vec128_i8_signed_reg_mem:
3488 ; SSE41-NEXT: movdqa (%rdi), %xmm1
3489 ; SSE41-NEXT: movdqa %xmm0, %xmm2
3490 ; SSE41-NEXT: pcmpgtb %xmm1, %xmm2
3491 ; SSE41-NEXT: por {{.*}}(%rip), %xmm2
3492 ; SSE41-NEXT: movdqa %xmm0, %xmm3
3493 ; SSE41-NEXT: pminsb %xmm1, %xmm3
3494 ; SSE41-NEXT: pmaxsb %xmm0, %xmm1
3495 ; SSE41-NEXT: psubb %xmm3, %xmm1
3496 ; SSE41-NEXT: psrlw $1, %xmm1
3497 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
3498 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3499 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3500 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3501 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3502 ; SSE41-NEXT: pmullw %xmm1, %xmm2
3503 ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
3504 ; SSE41-NEXT: pand %xmm1, %xmm2
3505 ; SSE41-NEXT: pmullw %xmm4, %xmm3
3506 ; SSE41-NEXT: pand %xmm1, %xmm3
3507 ; SSE41-NEXT: packuswb %xmm2, %xmm3
3508 ; SSE41-NEXT: paddb %xmm3, %xmm0
3511 ; AVX1-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3512 ; AVX1-FALLBACK: # %bb.0:
3513 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3514 ; AVX1-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3515 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3516 ; AVX1-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3517 ; AVX1-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3518 ; AVX1-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3519 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3520 ; AVX1-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3521 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3522 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3523 ; AVX1-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3524 ; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
3525 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm3, %xmm3
3526 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3527 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3528 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3529 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm1, %xmm1
3530 ; AVX1-FALLBACK-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
3531 ; AVX1-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3532 ; AVX1-FALLBACK-NEXT: retq
3534 ; AVX2-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3535 ; AVX2-FALLBACK: # %bb.0:
3536 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3537 ; AVX2-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3538 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3539 ; AVX2-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3540 ; AVX2-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3541 ; AVX2-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3542 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3543 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3544 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3545 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3546 ; AVX2-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3547 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
3548 ; AVX2-FALLBACK-NEXT: vextracti128 $1, %ymm1, %xmm2
3549 ; AVX2-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3550 ; AVX2-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3551 ; AVX2-FALLBACK-NEXT: vzeroupper
3552 ; AVX2-FALLBACK-NEXT: retq
3554 ; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3555 ; XOP-FALLBACK: # %bb.0:
3556 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3557 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3558 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3559 ; XOP-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3560 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3561 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3562 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3563 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3564 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3565 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3566 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3567 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3568 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3569 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3570 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3571 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3572 ; XOP-FALLBACK-NEXT: retq
3574 ; XOPAVX1-LABEL: vec128_i8_signed_reg_mem:
3576 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm1
3577 ; XOPAVX1-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3578 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3579 ; XOPAVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3580 ; XOPAVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3581 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3582 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3583 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3584 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3585 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3586 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3587 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3588 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3589 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3590 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3591 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3592 ; XOPAVX1-NEXT: retq
3594 ; XOPAVX2-LABEL: vec128_i8_signed_reg_mem:
3596 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm1
3597 ; XOPAVX2-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3598 ; XOPAVX2-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3599 ; XOPAVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3600 ; XOPAVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3601 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3602 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3603 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3604 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3605 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3606 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3607 ; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
3608 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
3609 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3610 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3611 ; XOPAVX2-NEXT: vzeroupper
3612 ; XOPAVX2-NEXT: retq
3614 ; AVX512F-LABEL: vec128_i8_signed_reg_mem:
3616 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm1
3617 ; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3618 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3619 ; AVX512F-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3620 ; AVX512F-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3621 ; AVX512F-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3622 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
3623 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3624 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3625 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3626 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3627 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3628 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
3629 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3630 ; AVX512F-NEXT: vzeroupper
3631 ; AVX512F-NEXT: retq
3633 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3634 ; AVX512VL-FALLBACK: # %bb.0:
3635 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3636 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3637 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3638 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3639 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3640 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3641 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3642 ; AVX512VL-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3643 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3644 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3645 ; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3646 ; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3647 ; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
3648 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3649 ; AVX512VL-FALLBACK-NEXT: vzeroupper
3650 ; AVX512VL-FALLBACK-NEXT: retq
3652 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
3653 ; AVX512BW-FALLBACK: # %bb.0:
3654 ; AVX512BW-FALLBACK-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
3655 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm1
3656 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
3657 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3658 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3659 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
3660 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3661 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3662 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3663 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3664 ; AVX512BW-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3665 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3666 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3667 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3668 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
3669 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3670 ; AVX512BW-FALLBACK-NEXT: vzeroupper
3671 ; AVX512BW-FALLBACK-NEXT: retq
3673 ; AVX512VLBW-LABEL: vec128_i8_signed_reg_mem:
3674 ; AVX512VLBW: # %bb.0:
3675 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm1
3676 ; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
3677 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3678 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3679 ; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
3680 ; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3681 ; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3682 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3683 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
3684 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3685 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3686 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3687 ; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3688 ; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
3689 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3690 ; AVX512VLBW-NEXT: vzeroupper
3691 ; AVX512VLBW-NEXT: retq
3692 %a2 = load <16 x i8>, <16 x i8>* %a2_addr
3693 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
3694 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3695 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
3696 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
3697 %t7 = sub <16 x i8> %t6, %t5
3698 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3699 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
3700 %a10 = add nsw <16 x i8> %t9, %a1 ; signed
3704 define <16 x i8> @vec128_i8_signed_mem_mem(<16 x i8>* %a1_addr, <16 x i8>* %a2_addr) nounwind {
3705 ; SSE2-LABEL: vec128_i8_signed_mem_mem:
3707 ; SSE2-NEXT: movdqa (%rdi), %xmm1
3708 ; SSE2-NEXT: movdqa (%rsi), %xmm3
3709 ; SSE2-NEXT: movdqa %xmm1, %xmm2
3710 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm2
3711 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3712 ; SSE2-NEXT: por %xmm2, %xmm0
3713 ; SSE2-NEXT: movdqa %xmm3, %xmm4
3714 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm4
3715 ; SSE2-NEXT: movdqa %xmm1, %xmm5
3716 ; SSE2-NEXT: pand %xmm4, %xmm5
3717 ; SSE2-NEXT: pandn %xmm3, %xmm4
3718 ; SSE2-NEXT: por %xmm5, %xmm4
3719 ; SSE2-NEXT: movdqa %xmm1, %xmm5
3720 ; SSE2-NEXT: pand %xmm2, %xmm5
3721 ; SSE2-NEXT: pandn %xmm3, %xmm2
3722 ; SSE2-NEXT: por %xmm5, %xmm2
3723 ; SSE2-NEXT: psubb %xmm4, %xmm2
3724 ; SSE2-NEXT: psrlw $1, %xmm2
3725 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
3726 ; SSE2-NEXT: movdqa %xmm2, %xmm3
3727 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
3728 ; SSE2-NEXT: movdqa %xmm0, %xmm4
3729 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
3730 ; SSE2-NEXT: pmullw %xmm3, %xmm4
3731 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
3732 ; SSE2-NEXT: pand %xmm3, %xmm4
3733 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
3734 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
3735 ; SSE2-NEXT: pmullw %xmm2, %xmm0
3736 ; SSE2-NEXT: pand %xmm3, %xmm0
3737 ; SSE2-NEXT: packuswb %xmm4, %xmm0
3738 ; SSE2-NEXT: paddb %xmm1, %xmm0
3741 ; SSE41-LABEL: vec128_i8_signed_mem_mem:
3743 ; SSE41-NEXT: movdqa (%rdi), %xmm1
3744 ; SSE41-NEXT: movdqa (%rsi), %xmm2
3745 ; SSE41-NEXT: movdqa %xmm1, %xmm3
3746 ; SSE41-NEXT: pcmpgtb %xmm2, %xmm3
3747 ; SSE41-NEXT: por {{.*}}(%rip), %xmm3
3748 ; SSE41-NEXT: movdqa %xmm1, %xmm0
3749 ; SSE41-NEXT: pminsb %xmm2, %xmm0
3750 ; SSE41-NEXT: pmaxsb %xmm1, %xmm2
3751 ; SSE41-NEXT: psubb %xmm0, %xmm2
3752 ; SSE41-NEXT: psrlw $1, %xmm2
3753 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
3754 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3755 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3756 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
3757 ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
3758 ; SSE41-NEXT: pmullw %xmm2, %xmm3
3759 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
3760 ; SSE41-NEXT: pand %xmm2, %xmm3
3761 ; SSE41-NEXT: pmullw %xmm4, %xmm0
3762 ; SSE41-NEXT: pand %xmm2, %xmm0
3763 ; SSE41-NEXT: packuswb %xmm3, %xmm0
3764 ; SSE41-NEXT: paddb %xmm1, %xmm0
3767 ; AVX1-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3768 ; AVX1-FALLBACK: # %bb.0:
3769 ; AVX1-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3770 ; AVX1-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3771 ; AVX1-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3772 ; AVX1-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3773 ; AVX1-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3774 ; AVX1-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3775 ; AVX1-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3776 ; AVX1-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3777 ; AVX1-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3778 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3779 ; AVX1-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3780 ; AVX1-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3781 ; AVX1-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
3782 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm3, %xmm3
3783 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3784 ; AVX1-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3785 ; AVX1-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3786 ; AVX1-FALLBACK-NEXT: vpand %xmm4, %xmm1, %xmm1
3787 ; AVX1-FALLBACK-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
3788 ; AVX1-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3789 ; AVX1-FALLBACK-NEXT: retq
3791 ; AVX2-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3792 ; AVX2-FALLBACK: # %bb.0:
3793 ; AVX2-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3794 ; AVX2-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3795 ; AVX2-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3796 ; AVX2-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3797 ; AVX2-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3798 ; AVX2-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3799 ; AVX2-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3800 ; AVX2-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3801 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3802 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3803 ; AVX2-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3804 ; AVX2-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3805 ; AVX2-FALLBACK-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
3806 ; AVX2-FALLBACK-NEXT: vextracti128 $1, %ymm1, %xmm2
3807 ; AVX2-FALLBACK-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3808 ; AVX2-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3809 ; AVX2-FALLBACK-NEXT: vzeroupper
3810 ; AVX2-FALLBACK-NEXT: retq
3812 ; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3813 ; XOP-FALLBACK: # %bb.0:
3814 ; XOP-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3815 ; XOP-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3816 ; XOP-FALLBACK-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3817 ; XOP-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3818 ; XOP-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3819 ; XOP-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3820 ; XOP-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3821 ; XOP-FALLBACK-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3822 ; XOP-FALLBACK-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3823 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3824 ; XOP-FALLBACK-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3825 ; XOP-FALLBACK-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3826 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3827 ; XOP-FALLBACK-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3828 ; XOP-FALLBACK-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3829 ; XOP-FALLBACK-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3830 ; XOP-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3831 ; XOP-FALLBACK-NEXT: retq
3833 ; XOPAVX1-LABEL: vec128_i8_signed_mem_mem:
3835 ; XOPAVX1-NEXT: vmovdqa (%rdi), %xmm0
3836 ; XOPAVX1-NEXT: vmovdqa (%rsi), %xmm1
3837 ; XOPAVX1-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3838 ; XOPAVX1-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3839 ; XOPAVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3840 ; XOPAVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3841 ; XOPAVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3842 ; XOPAVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3843 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3844 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
3845 ; XOPAVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
3846 ; XOPAVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
3847 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
3848 ; XOPAVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
3849 ; XOPAVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1
3850 ; XOPAVX1-NEXT: vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
3851 ; XOPAVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3852 ; XOPAVX1-NEXT: retq
3854 ; XOPAVX2-LABEL: vec128_i8_signed_mem_mem:
3856 ; XOPAVX2-NEXT: vmovdqa (%rdi), %xmm0
3857 ; XOPAVX2-NEXT: vmovdqa (%rsi), %xmm1
3858 ; XOPAVX2-NEXT: vpcomgtb %xmm1, %xmm0, %xmm2
3859 ; XOPAVX2-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3860 ; XOPAVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3861 ; XOPAVX2-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3862 ; XOPAVX2-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3863 ; XOPAVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
3864 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1
3865 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3866 ; XOPAVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3867 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3868 ; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
3869 ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
3870 ; XOPAVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
3871 ; XOPAVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3872 ; XOPAVX2-NEXT: vzeroupper
3873 ; XOPAVX2-NEXT: retq
3875 ; AVX512F-LABEL: vec128_i8_signed_mem_mem:
3877 ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
3878 ; AVX512F-NEXT: vmovdqa (%rsi), %xmm1
3879 ; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3880 ; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3881 ; AVX512F-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3882 ; AVX512F-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3883 ; AVX512F-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3884 ; AVX512F-NEXT: vpsrlw $1, %xmm1, %xmm1
3885 ; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3886 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3887 ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3888 ; AVX512F-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3889 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3890 ; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
3891 ; AVX512F-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3892 ; AVX512F-NEXT: vzeroupper
3893 ; AVX512F-NEXT: retq
3895 ; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3896 ; AVX512VL-FALLBACK: # %bb.0:
3897 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3898 ; AVX512VL-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3899 ; AVX512VL-FALLBACK-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm2
3900 ; AVX512VL-FALLBACK-NEXT: vpor {{.*}}(%rip), %xmm2, %xmm2
3901 ; AVX512VL-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm3
3902 ; AVX512VL-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3903 ; AVX512VL-FALLBACK-NEXT: vpsubb %xmm3, %xmm1, %xmm1
3904 ; AVX512VL-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3905 ; AVX512VL-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3906 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3907 ; AVX512VL-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
3908 ; AVX512VL-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3909 ; AVX512VL-FALLBACK-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
3910 ; AVX512VL-FALLBACK-NEXT: vpmovdb %zmm1, %xmm1
3911 ; AVX512VL-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3912 ; AVX512VL-FALLBACK-NEXT: vzeroupper
3913 ; AVX512VL-FALLBACK-NEXT: retq
3915 ; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
3916 ; AVX512BW-FALLBACK: # %bb.0:
3917 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rdi), %xmm0
3918 ; AVX512BW-FALLBACK-NEXT: vmovdqa (%rsi), %xmm1
3919 ; AVX512BW-FALLBACK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
3920 ; AVX512BW-FALLBACK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3921 ; AVX512BW-FALLBACK-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3922 ; AVX512BW-FALLBACK-NEXT: vmovdqu8 %zmm2, %zmm3 {%k1}
3923 ; AVX512BW-FALLBACK-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3924 ; AVX512BW-FALLBACK-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3925 ; AVX512BW-FALLBACK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3926 ; AVX512BW-FALLBACK-NEXT: vpsrlw $1, %xmm1, %xmm1
3927 ; AVX512BW-FALLBACK-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3928 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3929 ; AVX512BW-FALLBACK-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3930 ; AVX512BW-FALLBACK-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3931 ; AVX512BW-FALLBACK-NEXT: vpmovwb %zmm1, %ymm1
3932 ; AVX512BW-FALLBACK-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3933 ; AVX512BW-FALLBACK-NEXT: vzeroupper
3934 ; AVX512BW-FALLBACK-NEXT: retq
3936 ; AVX512VLBW-LABEL: vec128_i8_signed_mem_mem:
3937 ; AVX512VLBW: # %bb.0:
3938 ; AVX512VLBW-NEXT: vmovdqa (%rdi), %xmm0
3939 ; AVX512VLBW-NEXT: vmovdqa (%rsi), %xmm1
3940 ; AVX512VLBW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
3941 ; AVX512VLBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
3942 ; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
3943 ; AVX512VLBW-NEXT: vmovdqu8 %xmm2, %xmm3 {%k1}
3944 ; AVX512VLBW-NEXT: vpminsb %xmm1, %xmm0, %xmm2
3945 ; AVX512VLBW-NEXT: vpmaxsb %xmm1, %xmm0, %xmm1
3946 ; AVX512VLBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
3947 ; AVX512VLBW-NEXT: vpsrlw $1, %xmm1, %xmm1
3948 ; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
3949 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
3950 ; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
3951 ; AVX512VLBW-NEXT: vpmullw %ymm2, %ymm1, %ymm1
3952 ; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
3953 ; AVX512VLBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
3954 ; AVX512VLBW-NEXT: vzeroupper
3955 ; AVX512VLBW-NEXT: retq
3956 %a1 = load <16 x i8>, <16 x i8>* %a1_addr
3957 %a2 = load <16 x i8>, <16 x i8>* %a2_addr
3958 %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
3959 %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3960 %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
3961 %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
3962 %t7 = sub <16 x i8> %t6, %t5
3963 %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
3964 %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
3965 %a10 = add nsw <16 x i8> %t9, %a1 ; signed