1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK-SSE,CHECK-SSE2
3 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=CHECK-SSE,CHECK-SSE41
4 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX1
5 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX2
6 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl < %s | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX512VL
8 define <4 x i1> @t0_all_tautological(<4 x i32> %X) nounwind {
9 ; CHECK-SSE-LABEL: t0_all_tautological:
11 ; CHECK-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
12 ; CHECK-SSE-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
13 ; CHECK-SSE-NEXT: retq
15 ; CHECK-AVX-LABEL: t0_all_tautological:
17 ; CHECK-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
18 ; CHECK-AVX-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
19 ; CHECK-AVX-NEXT: retq
20 %urem = urem <4 x i32> %X, <i32 1, i32 1, i32 2, i32 2>
21 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 1, i32 2, i32 3>
25 define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
26 ; CHECK-SSE2-LABEL: t1_all_odd_eq:
27 ; CHECK-SSE2: # %bb.0:
28 ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
29 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
30 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
31 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
32 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
33 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
34 ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
35 ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
36 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
37 ; CHECK-SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
38 ; CHECK-SSE2-NEXT: retq
40 ; CHECK-SSE41-LABEL: t1_all_odd_eq:
41 ; CHECK-SSE41: # %bb.0:
42 ; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
43 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295]
44 ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
45 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
46 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
47 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
48 ; CHECK-SSE41-NEXT: retq
50 ; CHECK-AVX1-LABEL: t1_all_odd_eq:
51 ; CHECK-AVX1: # %bb.0:
52 ; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
53 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
54 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
55 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
56 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
57 ; CHECK-AVX1-NEXT: retq
59 ; CHECK-AVX2-LABEL: t1_all_odd_eq:
60 ; CHECK-AVX2: # %bb.0:
61 ; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
62 ; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
63 ; CHECK-AVX2-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
64 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
65 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
66 ; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
67 ; CHECK-AVX2-NEXT: retq
69 ; CHECK-AVX512VL-LABEL: t1_all_odd_eq:
70 ; CHECK-AVX512VL: # %bb.0:
71 ; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
72 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
73 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
74 ; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
75 ; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
76 ; CHECK-AVX512VL-NEXT: retq
77 %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
78 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
82 define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
83 ; CHECK-SSE2-LABEL: t1_all_odd_ne:
84 ; CHECK-SSE2: # %bb.0:
85 ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
86 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
87 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
88 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
89 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
90 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
91 ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
92 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
93 ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
94 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
95 ; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
96 ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
97 ; CHECK-SSE2-NEXT: retq
99 ; CHECK-SSE41-LABEL: t1_all_odd_ne:
100 ; CHECK-SSE41: # %bb.0:
101 ; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
102 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295]
103 ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
104 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
105 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm1
106 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm0
107 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
108 ; CHECK-SSE41-NEXT: retq
110 ; CHECK-AVX1-LABEL: t1_all_odd_ne:
111 ; CHECK-AVX1: # %bb.0:
112 ; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
113 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
114 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
115 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
116 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
117 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
118 ; CHECK-AVX1-NEXT: retq
120 ; CHECK-AVX2-LABEL: t1_all_odd_ne:
121 ; CHECK-AVX2: # %bb.0:
122 ; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
123 ; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
124 ; CHECK-AVX2-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
125 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
126 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
127 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
128 ; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
129 ; CHECK-AVX2-NEXT: retq
131 ; CHECK-AVX512VL-LABEL: t1_all_odd_ne:
132 ; CHECK-AVX512VL: # %bb.0:
133 ; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
134 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
135 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
136 ; CHECK-AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
137 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
138 ; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
139 ; CHECK-AVX512VL-NEXT: retq
140 %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
141 %cmp = icmp ne <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
145 define <8 x i1> @t2_narrow(<8 x i16> %X) nounwind {
146 ; CHECK-SSE2-LABEL: t2_narrow:
147 ; CHECK-SSE2: # %bb.0:
148 ; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
149 ; CHECK-SSE2-NEXT: psubusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
150 ; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
151 ; CHECK-SSE2-NEXT: pcmpeqw %xmm1, %xmm0
152 ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
153 ; CHECK-SSE2-NEXT: retq
155 ; CHECK-SSE41-LABEL: t2_narrow:
156 ; CHECK-SSE41: # %bb.0:
157 ; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
158 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [21845,65535,65535,65535,21845,65535,65535,65535]
159 ; CHECK-SSE41-NEXT: pminuw %xmm0, %xmm1
160 ; CHECK-SSE41-NEXT: pcmpeqw %xmm1, %xmm0
161 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
162 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
163 ; CHECK-SSE41-NEXT: retq
165 ; CHECK-AVX1-LABEL: t2_narrow:
166 ; CHECK-AVX1: # %bb.0:
167 ; CHECK-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
168 ; CHECK-AVX1-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
169 ; CHECK-AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
170 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
171 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
172 ; CHECK-AVX1-NEXT: retq
174 ; CHECK-AVX2-LABEL: t2_narrow:
175 ; CHECK-AVX2: # %bb.0:
176 ; CHECK-AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
177 ; CHECK-AVX2-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
178 ; CHECK-AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
179 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
180 ; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
181 ; CHECK-AVX2-NEXT: retq
183 ; CHECK-AVX512VL-LABEL: t2_narrow:
184 ; CHECK-AVX512VL: # %bb.0:
185 ; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
186 ; CHECK-AVX512VL-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
187 ; CHECK-AVX512VL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
188 ; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
189 ; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
190 ; CHECK-AVX512VL-NEXT: retq
191 %urem = urem <8 x i16> %X, <i16 3, i16 1, i16 1, i16 9, i16 3, i16 1, i16 1, i16 9>
192 %cmp = icmp eq <8 x i16> %urem, <i16 0, i16 0, i16 42, i16 42, i16 0, i16 0, i16 42, i16 42>
196 define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
197 ; CHECK-SSE2-LABEL: t3_wide:
198 ; CHECK-SSE2: # %bb.0:
199 ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
200 ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2
201 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
202 ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm3
203 ; CHECK-SSE2-NEXT: psrlq $32, %xmm3
204 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
205 ; CHECK-SSE2-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
206 ; CHECK-SSE2-NEXT: paddq %xmm3, %xmm0
207 ; CHECK-SSE2-NEXT: psllq $32, %xmm0
208 ; CHECK-SSE2-NEXT: paddq %xmm2, %xmm0
209 ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
210 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
211 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
212 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
213 ; CHECK-SSE2-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
214 ; CHECK-SSE2-NEXT: pand %xmm2, %xmm1
215 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
216 ; CHECK-SSE2-NEXT: por %xmm1, %xmm0
217 ; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
218 ; CHECK-SSE2-NEXT: pxor %xmm0, %xmm1
219 ; CHECK-SSE2-NEXT: movq {{.*#+}} xmm0 = xmm1[0],zero
220 ; CHECK-SSE2-NEXT: retq
222 ; CHECK-SSE41-LABEL: t3_wide:
223 ; CHECK-SSE41: # %bb.0:
224 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
225 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm2
226 ; CHECK-SSE41-NEXT: pmuludq %xmm1, %xmm2
227 ; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm3
228 ; CHECK-SSE41-NEXT: psrlq $32, %xmm3
229 ; CHECK-SSE41-NEXT: pmuludq %xmm1, %xmm3
230 ; CHECK-SSE41-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
231 ; CHECK-SSE41-NEXT: paddq %xmm3, %xmm0
232 ; CHECK-SSE41-NEXT: psllq $32, %xmm0
233 ; CHECK-SSE41-NEXT: paddq %xmm2, %xmm0
234 ; CHECK-SSE41-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
235 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
236 ; CHECK-SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
237 ; CHECK-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
238 ; CHECK-SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
239 ; CHECK-SSE41-NEXT: pand %xmm2, %xmm1
240 ; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
241 ; CHECK-SSE41-NEXT: por %xmm1, %xmm0
242 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm1
243 ; CHECK-SSE41-NEXT: pxor %xmm0, %xmm1
244 ; CHECK-SSE41-NEXT: movq {{.*#+}} xmm0 = xmm1[0],zero
245 ; CHECK-SSE41-NEXT: retq
247 ; CHECK-AVX1-LABEL: t3_wide:
248 ; CHECK-AVX1: # %bb.0:
249 ; CHECK-AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
250 ; CHECK-AVX1-NEXT: # xmm1 = mem[0,0]
251 ; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
252 ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
253 ; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
254 ; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
255 ; CHECK-AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
256 ; CHECK-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
257 ; CHECK-AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
258 ; CHECK-AVX1-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
259 ; CHECK-AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
260 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
261 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
262 ; CHECK-AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
263 ; CHECK-AVX1-NEXT: retq
265 ; CHECK-AVX2-LABEL: t3_wide:
266 ; CHECK-AVX2: # %bb.0:
267 ; CHECK-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
268 ; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
269 ; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3
270 ; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
271 ; CHECK-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
272 ; CHECK-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
273 ; CHECK-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
274 ; CHECK-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
275 ; CHECK-AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
276 ; CHECK-AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
277 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
278 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
279 ; CHECK-AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
280 ; CHECK-AVX2-NEXT: retq
282 ; CHECK-AVX512VL-LABEL: t3_wide:
283 ; CHECK-AVX512VL: # %bb.0:
284 ; CHECK-AVX512VL-NEXT: vpbroadcastq {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
285 ; CHECK-AVX512VL-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
286 ; CHECK-AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm3
287 ; CHECK-AVX512VL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
288 ; CHECK-AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
289 ; CHECK-AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
290 ; CHECK-AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0
291 ; CHECK-AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
292 ; CHECK-AVX512VL-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
293 ; CHECK-AVX512VL-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
294 ; CHECK-AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
295 ; CHECK-AVX512VL-NEXT: retq
296 %urem = urem <2 x i64> %X, <i64 3, i64 1>
297 %cmp = icmp eq <2 x i64> %urem, <i64 0, i64 42>