1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK-SSE,CHECK-SSE2
3 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=CHECK-SSE,CHECK-SSE41
4 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX1
5 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX2
6 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512vl < %s | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX512VL
8 define <4 x i1> @t0_all_tautological(<4 x i32> %X) nounwind {
9 ; CHECK-SSE-LABEL: t0_all_tautological:
11 ; CHECK-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
12 ; CHECK-SSE-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
13 ; CHECK-SSE-NEXT: retq
15 ; CHECK-AVX-LABEL: t0_all_tautological:
17 ; CHECK-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
18 ; CHECK-AVX-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
19 ; CHECK-AVX-NEXT: retq
20 %urem = urem <4 x i32> %X, <i32 1, i32 1, i32 2, i32 2>
21 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 1, i32 2, i32 3>
25 define <4 x i1> @t1_all_odd_eq(<4 x i32> %X) nounwind {
26 ; CHECK-SSE2-LABEL: t1_all_odd_eq:
27 ; CHECK-SSE2: # %bb.0:
28 ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
29 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
30 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
31 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
32 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
33 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
34 ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
35 ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
36 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
37 ; CHECK-SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
38 ; CHECK-SSE2-NEXT: retq
40 ; CHECK-SSE41-LABEL: t1_all_odd_eq:
41 ; CHECK-SSE41: # %bb.0:
42 ; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
43 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295]
44 ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
45 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
46 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
47 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
48 ; CHECK-SSE41-NEXT: retq
50 ; CHECK-AVX1-LABEL: t1_all_odd_eq:
51 ; CHECK-AVX1: # %bb.0:
52 ; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
53 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
54 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
55 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
56 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
57 ; CHECK-AVX1-NEXT: retq
59 ; CHECK-AVX2-LABEL: t1_all_odd_eq:
60 ; CHECK-AVX2: # %bb.0:
61 ; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
62 ; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
63 ; CHECK-AVX2-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
64 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
65 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
66 ; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
67 ; CHECK-AVX2-NEXT: retq
69 ; CHECK-AVX512VL-LABEL: t1_all_odd_eq:
70 ; CHECK-AVX512VL: # %bb.0:
71 ; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
72 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
73 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
74 ; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
75 ; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
76 ; CHECK-AVX512VL-NEXT: retq
77 %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
78 %cmp = icmp eq <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
82 define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
83 ; CHECK-SSE2-LABEL: t1_all_odd_ne:
84 ; CHECK-SSE2: # %bb.0:
85 ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
86 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
87 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
88 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
89 ; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
90 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
91 ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
92 ; CHECK-SSE2-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
93 ; CHECK-SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
94 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
95 ; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1
96 ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
97 ; CHECK-SSE2-NEXT: retq
99 ; CHECK-SSE41-LABEL: t1_all_odd_ne:
100 ; CHECK-SSE41: # %bb.0:
101 ; CHECK-SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
102 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1431655765,4294967295,4294967295,4294967295]
103 ; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
104 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
105 ; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm1
106 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm0
107 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
108 ; CHECK-SSE41-NEXT: retq
110 ; CHECK-AVX1-LABEL: t1_all_odd_ne:
111 ; CHECK-AVX1: # %bb.0:
112 ; CHECK-AVX1-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
113 ; CHECK-AVX1-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
114 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
115 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
116 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
117 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
118 ; CHECK-AVX1-NEXT: retq
120 ; CHECK-AVX2-LABEL: t1_all_odd_ne:
121 ; CHECK-AVX2: # %bb.0:
122 ; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
123 ; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
124 ; CHECK-AVX2-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
125 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
126 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
127 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
128 ; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
129 ; CHECK-AVX2-NEXT: retq
131 ; CHECK-AVX512VL-LABEL: t1_all_odd_ne:
132 ; CHECK-AVX512VL: # %bb.0:
133 ; CHECK-AVX512VL-NEXT: vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
134 ; CHECK-AVX512VL-NEXT: vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
135 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
136 ; CHECK-AVX512VL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
137 ; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
138 ; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
139 ; CHECK-AVX512VL-NEXT: retq
140 %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
141 %cmp = icmp ne <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>
145 define <8 x i1> @t2_narrow(<8 x i16> %X) nounwind {
146 ; CHECK-SSE2-LABEL: t2_narrow:
147 ; CHECK-SSE2: # %bb.0:
148 ; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
149 ; CHECK-SSE2-NEXT: psubusw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
150 ; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
151 ; CHECK-SSE2-NEXT: pcmpeqw %xmm1, %xmm0
152 ; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
153 ; CHECK-SSE2-NEXT: retq
155 ; CHECK-SSE41-LABEL: t2_narrow:
156 ; CHECK-SSE41: # %bb.0:
157 ; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
158 ; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [21845,65535,65535,65535,21845,65535,65535,65535]
159 ; CHECK-SSE41-NEXT: pminuw %xmm0, %xmm1
160 ; CHECK-SSE41-NEXT: pcmpeqw %xmm1, %xmm0
161 ; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
162 ; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
163 ; CHECK-SSE41-NEXT: retq
165 ; CHECK-AVX1-LABEL: t2_narrow:
166 ; CHECK-AVX1: # %bb.0:
167 ; CHECK-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
168 ; CHECK-AVX1-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
169 ; CHECK-AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
170 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
171 ; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
172 ; CHECK-AVX1-NEXT: retq
174 ; CHECK-AVX2-LABEL: t2_narrow:
175 ; CHECK-AVX2: # %bb.0:
176 ; CHECK-AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
177 ; CHECK-AVX2-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
178 ; CHECK-AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
179 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
180 ; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
181 ; CHECK-AVX2-NEXT: retq
183 ; CHECK-AVX512VL-LABEL: t2_narrow:
184 ; CHECK-AVX512VL: # %bb.0:
185 ; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
186 ; CHECK-AVX512VL-NEXT: vpminuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
187 ; CHECK-AVX512VL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
188 ; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
189 ; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
190 ; CHECK-AVX512VL-NEXT: retq
191 %urem = urem <8 x i16> %X, <i16 3, i16 1, i16 1, i16 9, i16 3, i16 1, i16 1, i16 9>
192 %cmp = icmp eq <8 x i16> %urem, <i16 0, i16 0, i16 42, i16 42, i16 0, i16 0, i16 42, i16 42>
196 define <2 x i1> @t3_wide(<2 x i64> %X) nounwind {
197 ; CHECK-SSE-LABEL: t3_wide:
198 ; CHECK-SSE: # %bb.0:
199 ; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
200 ; CHECK-SSE-NEXT: movdqa %xmm0, %xmm2
201 ; CHECK-SSE-NEXT: pmuludq %xmm1, %xmm2
202 ; CHECK-SSE-NEXT: movdqa %xmm0, %xmm3
203 ; CHECK-SSE-NEXT: psrlq $32, %xmm3
204 ; CHECK-SSE-NEXT: pmuludq %xmm1, %xmm3
205 ; CHECK-SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
206 ; CHECK-SSE-NEXT: paddq %xmm3, %xmm0
207 ; CHECK-SSE-NEXT: psllq $32, %xmm0
208 ; CHECK-SSE-NEXT: paddq %xmm2, %xmm0
209 ; CHECK-SSE-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
210 ; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15372286730238776661,9223372034707292159]
211 ; CHECK-SSE-NEXT: movdqa %xmm0, %xmm2
212 ; CHECK-SSE-NEXT: pcmpgtd %xmm1, %xmm2
213 ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
214 ; CHECK-SSE-NEXT: pcmpeqd %xmm1, %xmm0
215 ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
216 ; CHECK-SSE-NEXT: pand %xmm3, %xmm0
217 ; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
218 ; CHECK-SSE-NEXT: por %xmm0, %xmm1
219 ; CHECK-SSE-NEXT: pcmpeqd %xmm0, %xmm0
220 ; CHECK-SSE-NEXT: pxor %xmm1, %xmm0
221 ; CHECK-SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
222 ; CHECK-SSE-NEXT: retq
224 ; CHECK-AVX1-LABEL: t3_wide:
225 ; CHECK-AVX1: # %bb.0:
226 ; CHECK-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
227 ; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
228 ; CHECK-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm3
229 ; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
230 ; CHECK-AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
231 ; CHECK-AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
232 ; CHECK-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
233 ; CHECK-AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
234 ; CHECK-AVX1-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
235 ; CHECK-AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
236 ; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
237 ; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
238 ; CHECK-AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
239 ; CHECK-AVX1-NEXT: retq
241 ; CHECK-AVX2-LABEL: t3_wide:
242 ; CHECK-AVX2: # %bb.0:
243 ; CHECK-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
244 ; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
245 ; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm3
246 ; CHECK-AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
247 ; CHECK-AVX2-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
248 ; CHECK-AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
249 ; CHECK-AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
250 ; CHECK-AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
251 ; CHECK-AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
252 ; CHECK-AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
253 ; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
254 ; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
255 ; CHECK-AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
256 ; CHECK-AVX2-NEXT: retq
258 ; CHECK-AVX512VL-LABEL: t3_wide:
259 ; CHECK-AVX512VL: # %bb.0:
260 ; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [12297829382473034411,12297829382473034411]
261 ; CHECK-AVX512VL-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
262 ; CHECK-AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm3
263 ; CHECK-AVX512VL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
264 ; CHECK-AVX512VL-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
265 ; CHECK-AVX512VL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
266 ; CHECK-AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0
267 ; CHECK-AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0
268 ; CHECK-AVX512VL-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
269 ; CHECK-AVX512VL-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
270 ; CHECK-AVX512VL-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
271 ; CHECK-AVX512VL-NEXT: retq
272 %urem = urem <2 x i64> %X, <i64 3, i64 1>
273 %cmp = icmp eq <2 x i64> %urem, <i64 0, i64 42>