1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=XOP
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F
12 define <2 x i64> @bitselect_v2i64_rr(<2 x i64>, <2 x i64>) {
13 ; SSE-LABEL: bitselect_v2i64_rr:
15 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
16 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
17 ; SSE-NEXT: orps %xmm1, %xmm0
20 ; XOP-LABEL: bitselect_v2i64_rr:
22 ; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
25 ; AVX-LABEL: bitselect_v2i64_rr:
27 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
28 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
29 ; AVX-NEXT: vorps %xmm0, %xmm1, %xmm0
31 %3 = and <2 x i64> %0, <i64 4294967296, i64 12884901890>
32 %4 = and <2 x i64> %1, <i64 -4294967297, i64 -12884901891>
33 %5 = or <2 x i64> %4, %3
37 define <2 x i64> @bitselect_v2i64_rm(<2 x i64>, <2 x i64>* nocapture readonly) {
38 ; SSE-LABEL: bitselect_v2i64_rm:
40 ; SSE-NEXT: movaps (%rdi), %xmm1
41 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
42 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
43 ; SSE-NEXT: orps %xmm1, %xmm0
46 ; XOP-LABEL: bitselect_v2i64_rm:
48 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
49 ; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
52 ; AVX-LABEL: bitselect_v2i64_rm:
54 ; AVX-NEXT: vmovaps (%rdi), %xmm1
55 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
56 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
57 ; AVX-NEXT: vorps %xmm0, %xmm1, %xmm0
59 %3 = load <2 x i64>, <2 x i64>* %1
60 %4 = and <2 x i64> %0, <i64 8589934593, i64 3>
61 %5 = and <2 x i64> %3, <i64 -8589934594, i64 -4>
62 %6 = or <2 x i64> %5, %4
66 define <2 x i64> @bitselect_v2i64_mr(<2 x i64>* nocapture readonly, <2 x i64>) {
67 ; SSE-LABEL: bitselect_v2i64_mr:
69 ; SSE-NEXT: movaps (%rdi), %xmm1
70 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
71 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
72 ; SSE-NEXT: orps %xmm1, %xmm0
75 ; XOP-LABEL: bitselect_v2i64_mr:
77 ; XOP-NEXT: vmovdqa (%rdi), %xmm1
78 ; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
81 ; AVX-LABEL: bitselect_v2i64_mr:
83 ; AVX-NEXT: vmovaps (%rdi), %xmm1
84 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
85 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
86 ; AVX-NEXT: vorps %xmm0, %xmm1, %xmm0
88 %3 = load <2 x i64>, <2 x i64>* %0
89 %4 = and <2 x i64> %3, <i64 12884901890, i64 4294967296>
90 %5 = and <2 x i64> %1, <i64 -12884901891, i64 -4294967297>
91 %6 = or <2 x i64> %4, %5
95 define <2 x i64> @bitselect_v2i64_mm(<2 x i64>* nocapture readonly, <2 x i64>* nocapture readonly) {
96 ; SSE-LABEL: bitselect_v2i64_mm:
98 ; SSE-NEXT: movaps (%rdi), %xmm1
99 ; SSE-NEXT: movaps (%rsi), %xmm0
100 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
101 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
102 ; SSE-NEXT: orps %xmm1, %xmm0
105 ; XOP-LABEL: bitselect_v2i64_mm:
107 ; XOP-NEXT: vmovdqa (%rsi), %xmm0
108 ; XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551612,18446744065119617022]
109 ; XOP-NEXT: vpcmov %xmm1, (%rdi), %xmm0, %xmm0
112 ; AVX-LABEL: bitselect_v2i64_mm:
114 ; AVX-NEXT: vmovaps (%rdi), %xmm0
115 ; AVX-NEXT: vmovaps (%rsi), %xmm1
116 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
117 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
118 ; AVX-NEXT: vorps %xmm0, %xmm1, %xmm0
120 %3 = load <2 x i64>, <2 x i64>* %0
121 %4 = load <2 x i64>, <2 x i64>* %1
122 %5 = and <2 x i64> %3, <i64 3, i64 8589934593>
123 %6 = and <2 x i64> %4, <i64 -4, i64 -8589934594>
124 %7 = or <2 x i64> %6, %5
132 define <4 x i64> @bitselect_v4i64_rr(<4 x i64>, <4 x i64>) {
133 ; SSE-LABEL: bitselect_v4i64_rr:
135 ; SSE-NEXT: andps {{.*}}(%rip), %xmm1
136 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
137 ; SSE-NEXT: andps {{.*}}(%rip), %xmm3
138 ; SSE-NEXT: orps %xmm3, %xmm1
139 ; SSE-NEXT: andps {{.*}}(%rip), %xmm2
140 ; SSE-NEXT: orps %xmm2, %xmm0
143 ; XOP-LABEL: bitselect_v4i64_rr:
145 ; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
148 ; AVX-LABEL: bitselect_v4i64_rr:
150 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
151 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
152 ; AVX-NEXT: vorps %ymm0, %ymm1, %ymm0
154 %3 = and <4 x i64> %0, <i64 4294967296, i64 12884901890, i64 12884901890, i64 12884901890>
155 %4 = and <4 x i64> %1, <i64 -4294967297, i64 -12884901891, i64 -12884901891, i64 -12884901891>
156 %5 = or <4 x i64> %4, %3
160 define <4 x i64> @bitselect_v4i64_rm(<4 x i64>, <4 x i64>* nocapture readonly) {
161 ; SSE-LABEL: bitselect_v4i64_rm:
163 ; SSE-NEXT: movaps {{.*#+}} xmm2 = [18446744065119617022,18446744073709551612]
164 ; SSE-NEXT: movaps 16(%rdi), %xmm4
165 ; SSE-NEXT: andps %xmm2, %xmm4
166 ; SSE-NEXT: movaps (%rdi), %xmm5
167 ; SSE-NEXT: andps %xmm2, %xmm5
168 ; SSE-NEXT: movaps %xmm2, %xmm3
169 ; SSE-NEXT: andnps %xmm0, %xmm3
170 ; SSE-NEXT: orps %xmm5, %xmm3
171 ; SSE-NEXT: andnps %xmm1, %xmm2
172 ; SSE-NEXT: orps %xmm4, %xmm2
173 ; SSE-NEXT: movaps %xmm3, %xmm0
174 ; SSE-NEXT: movaps %xmm2, %xmm1
177 ; XOP-LABEL: bitselect_v4i64_rm:
179 ; XOP-NEXT: vmovdqa (%rdi), %ymm1
180 ; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
183 ; AVX-LABEL: bitselect_v4i64_rm:
185 ; AVX-NEXT: vmovaps (%rdi), %ymm1
186 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
187 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
188 ; AVX-NEXT: vorps %ymm0, %ymm1, %ymm0
190 %3 = load <4 x i64>, <4 x i64>* %1
191 %4 = and <4 x i64> %0, <i64 8589934593, i64 3, i64 8589934593, i64 3>
192 %5 = and <4 x i64> %3, <i64 -8589934594, i64 -4, i64 -8589934594, i64 -4>
193 %6 = or <4 x i64> %5, %4
197 define <4 x i64> @bitselect_v4i64_mr(<4 x i64>* nocapture readonly, <4 x i64>) {
198 ; SSE-LABEL: bitselect_v4i64_mr:
200 ; SSE-NEXT: movaps {{.*#+}} xmm2 = [12884901890,4294967296]
201 ; SSE-NEXT: movaps 16(%rdi), %xmm4
202 ; SSE-NEXT: andps %xmm2, %xmm4
203 ; SSE-NEXT: movaps (%rdi), %xmm5
204 ; SSE-NEXT: andps %xmm2, %xmm5
205 ; SSE-NEXT: movaps %xmm2, %xmm3
206 ; SSE-NEXT: andnps %xmm0, %xmm3
207 ; SSE-NEXT: orps %xmm5, %xmm3
208 ; SSE-NEXT: andnps %xmm1, %xmm2
209 ; SSE-NEXT: orps %xmm4, %xmm2
210 ; SSE-NEXT: movaps %xmm3, %xmm0
211 ; SSE-NEXT: movaps %xmm2, %xmm1
214 ; XOP-LABEL: bitselect_v4i64_mr:
216 ; XOP-NEXT: vmovdqa (%rdi), %ymm1
217 ; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
220 ; AVX-LABEL: bitselect_v4i64_mr:
222 ; AVX-NEXT: vmovaps (%rdi), %ymm1
223 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
224 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
225 ; AVX-NEXT: vorps %ymm0, %ymm1, %ymm0
227 %3 = load <4 x i64>, <4 x i64>* %0
228 %4 = and <4 x i64> %3, <i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296>
229 %5 = and <4 x i64> %1, <i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297>
230 %6 = or <4 x i64> %4, %5
234 define <4 x i64> @bitselect_v4i64_mm(<4 x i64>* nocapture readonly, <4 x i64>* nocapture readonly) {
235 ; SSE-LABEL: bitselect_v4i64_mm:
237 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [18446744073709551612,18446744065119617022]
238 ; SSE-NEXT: movaps 16(%rsi), %xmm2
239 ; SSE-NEXT: andps %xmm1, %xmm2
240 ; SSE-NEXT: movaps (%rsi), %xmm3
241 ; SSE-NEXT: andps %xmm1, %xmm3
242 ; SSE-NEXT: movaps %xmm1, %xmm0
243 ; SSE-NEXT: andnps (%rdi), %xmm0
244 ; SSE-NEXT: orps %xmm3, %xmm0
245 ; SSE-NEXT: andnps 16(%rdi), %xmm1
246 ; SSE-NEXT: orps %xmm2, %xmm1
249 ; XOP-LABEL: bitselect_v4i64_mm:
251 ; XOP-NEXT: vmovdqa (%rsi), %ymm0
252 ; XOP-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
253 ; XOP-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0
256 ; AVX-LABEL: bitselect_v4i64_mm:
258 ; AVX-NEXT: vmovaps (%rdi), %ymm0
259 ; AVX-NEXT: vmovaps (%rsi), %ymm1
260 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
261 ; AVX-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
262 ; AVX-NEXT: vorps %ymm0, %ymm1, %ymm0
264 %3 = load <4 x i64>, <4 x i64>* %0
265 %4 = load <4 x i64>, <4 x i64>* %1
266 %5 = and <4 x i64> %3, <i64 3, i64 8589934593, i64 3, i64 8589934593>
267 %6 = and <4 x i64> %4, <i64 -4, i64 -8589934594, i64 -4, i64 -8589934594>
268 %7 = or <4 x i64> %6, %5
276 define <8 x i64> @bitselect_v8i64_rr(<8 x i64>, <8 x i64>) {
277 ; SSE-LABEL: bitselect_v8i64_rr:
279 ; SSE-NEXT: movaps {{.*#+}} xmm8 = [18446744060824649725,18446744060824649725]
280 ; SSE-NEXT: andps %xmm8, %xmm7
281 ; SSE-NEXT: movaps {{.*#+}} xmm9 = [18446744069414584319,18446744060824649725]
282 ; SSE-NEXT: andps %xmm9, %xmm6
283 ; SSE-NEXT: andps %xmm8, %xmm5
284 ; SSE-NEXT: andps %xmm9, %xmm4
285 ; SSE-NEXT: movaps %xmm9, %xmm10
286 ; SSE-NEXT: andnps %xmm0, %xmm10
287 ; SSE-NEXT: orps %xmm4, %xmm10
288 ; SSE-NEXT: movaps %xmm8, %xmm4
289 ; SSE-NEXT: andnps %xmm1, %xmm4
290 ; SSE-NEXT: orps %xmm5, %xmm4
291 ; SSE-NEXT: andnps %xmm2, %xmm9
292 ; SSE-NEXT: orps %xmm6, %xmm9
293 ; SSE-NEXT: andnps %xmm3, %xmm8
294 ; SSE-NEXT: orps %xmm7, %xmm8
295 ; SSE-NEXT: movaps %xmm10, %xmm0
296 ; SSE-NEXT: movaps %xmm4, %xmm1
297 ; SSE-NEXT: movaps %xmm9, %xmm2
298 ; SSE-NEXT: movaps %xmm8, %xmm3
301 ; XOP-LABEL: bitselect_v8i64_rr:
303 ; XOP-NEXT: vmovdqa {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
304 ; XOP-NEXT: vpcmov %ymm4, %ymm0, %ymm2, %ymm0
305 ; XOP-NEXT: vpcmov %ymm4, %ymm1, %ymm3, %ymm1
308 ; AVX1-LABEL: bitselect_v8i64_rr:
310 ; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
311 ; AVX1-NEXT: vandps %ymm4, %ymm3, %ymm3
312 ; AVX1-NEXT: vandps %ymm4, %ymm2, %ymm2
313 ; AVX1-NEXT: vandnps %ymm0, %ymm4, %ymm0
314 ; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
315 ; AVX1-NEXT: vandnps %ymm1, %ymm4, %ymm1
316 ; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
319 ; AVX2-LABEL: bitselect_v8i64_rr:
321 ; AVX2-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
322 ; AVX2-NEXT: vandps %ymm4, %ymm3, %ymm3
323 ; AVX2-NEXT: vandps %ymm4, %ymm2, %ymm2
324 ; AVX2-NEXT: vandnps %ymm0, %ymm4, %ymm0
325 ; AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0
326 ; AVX2-NEXT: vandnps %ymm1, %ymm4, %ymm1
327 ; AVX2-NEXT: vorps %ymm1, %ymm3, %ymm1
330 ; AVX512F-LABEL: bitselect_v8i64_rr:
332 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
333 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
334 ; AVX512F-NEXT: vporq %zmm0, %zmm1, %zmm0
336 %3 = and <8 x i64> %0, <i64 4294967296, i64 12884901890, i64 12884901890, i64 12884901890, i64 4294967296, i64 12884901890, i64 12884901890, i64 12884901890>
337 %4 = and <8 x i64> %1, <i64 -4294967297, i64 -12884901891, i64 -12884901891, i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -12884901891, i64 -12884901891>
338 %5 = or <8 x i64> %4, %3
342 define <8 x i64> @bitselect_v8i64_rm(<8 x i64>, <8 x i64>* nocapture readonly) {
343 ; SSE-LABEL: bitselect_v8i64_rm:
345 ; SSE-NEXT: movaps {{.*#+}} xmm4 = [18446744065119617022,18446744073709551612]
346 ; SSE-NEXT: movaps 48(%rdi), %xmm8
347 ; SSE-NEXT: andps %xmm4, %xmm8
348 ; SSE-NEXT: movaps 32(%rdi), %xmm9
349 ; SSE-NEXT: andps %xmm4, %xmm9
350 ; SSE-NEXT: movaps 16(%rdi), %xmm7
351 ; SSE-NEXT: andps %xmm4, %xmm7
352 ; SSE-NEXT: movaps (%rdi), %xmm6
353 ; SSE-NEXT: andps %xmm4, %xmm6
354 ; SSE-NEXT: movaps %xmm4, %xmm5
355 ; SSE-NEXT: andnps %xmm0, %xmm5
356 ; SSE-NEXT: orps %xmm6, %xmm5
357 ; SSE-NEXT: movaps %xmm4, %xmm6
358 ; SSE-NEXT: andnps %xmm1, %xmm6
359 ; SSE-NEXT: orps %xmm7, %xmm6
360 ; SSE-NEXT: movaps %xmm4, %xmm7
361 ; SSE-NEXT: andnps %xmm2, %xmm7
362 ; SSE-NEXT: orps %xmm9, %xmm7
363 ; SSE-NEXT: andnps %xmm3, %xmm4
364 ; SSE-NEXT: orps %xmm8, %xmm4
365 ; SSE-NEXT: movaps %xmm5, %xmm0
366 ; SSE-NEXT: movaps %xmm6, %xmm1
367 ; SSE-NEXT: movaps %xmm7, %xmm2
368 ; SSE-NEXT: movaps %xmm4, %xmm3
371 ; XOP-LABEL: bitselect_v8i64_rm:
373 ; XOP-NEXT: vmovdqa (%rdi), %ymm2
374 ; XOP-NEXT: vmovdqa 32(%rdi), %ymm3
375 ; XOP-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [18446744065119617022,18446744073709551612,18446744065119617022,18446744073709551612]
376 ; XOP-NEXT: # ymm4 = mem[0,1,0,1]
377 ; XOP-NEXT: vpcmov %ymm4, %ymm0, %ymm2, %ymm0
378 ; XOP-NEXT: vpcmov %ymm4, %ymm1, %ymm3, %ymm1
381 ; AVX1-LABEL: bitselect_v8i64_rm:
383 ; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [18446744065119617022,18446744073709551612,18446744065119617022,18446744073709551612]
384 ; AVX1-NEXT: # ymm2 = mem[0,1,0,1]
385 ; AVX1-NEXT: vandps 32(%rdi), %ymm2, %ymm3
386 ; AVX1-NEXT: vandps (%rdi), %ymm2, %ymm4
387 ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
388 ; AVX1-NEXT: vorps %ymm0, %ymm4, %ymm0
389 ; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
390 ; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
393 ; AVX2-LABEL: bitselect_v8i64_rm:
395 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [18446744065119617022,18446744073709551612,18446744065119617022,18446744073709551612]
396 ; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
397 ; AVX2-NEXT: vandps 32(%rdi), %ymm2, %ymm3
398 ; AVX2-NEXT: vandps (%rdi), %ymm2, %ymm4
399 ; AVX2-NEXT: vandnps %ymm0, %ymm2, %ymm0
400 ; AVX2-NEXT: vorps %ymm0, %ymm4, %ymm0
401 ; AVX2-NEXT: vandnps %ymm1, %ymm2, %ymm1
402 ; AVX2-NEXT: vorps %ymm1, %ymm3, %ymm1
405 ; AVX512F-LABEL: bitselect_v8i64_rm:
407 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
408 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
409 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
410 ; AVX512F-NEXT: vporq %zmm0, %zmm1, %zmm0
412 %3 = load <8 x i64>, <8 x i64>* %1
413 %4 = and <8 x i64> %0, <i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3>
414 %5 = and <8 x i64> %3, <i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4>
415 %6 = or <8 x i64> %5, %4
419 define <8 x i64> @bitselect_v8i64_mr(<8 x i64>* nocapture readonly, <8 x i64>) {
420 ; SSE-LABEL: bitselect_v8i64_mr:
422 ; SSE-NEXT: movaps {{.*#+}} xmm4 = [12884901890,4294967296]
423 ; SSE-NEXT: movaps 48(%rdi), %xmm8
424 ; SSE-NEXT: andps %xmm4, %xmm8
425 ; SSE-NEXT: movaps 32(%rdi), %xmm9
426 ; SSE-NEXT: andps %xmm4, %xmm9
427 ; SSE-NEXT: movaps 16(%rdi), %xmm7
428 ; SSE-NEXT: andps %xmm4, %xmm7
429 ; SSE-NEXT: movaps (%rdi), %xmm6
430 ; SSE-NEXT: andps %xmm4, %xmm6
431 ; SSE-NEXT: movaps %xmm4, %xmm5
432 ; SSE-NEXT: andnps %xmm0, %xmm5
433 ; SSE-NEXT: orps %xmm6, %xmm5
434 ; SSE-NEXT: movaps %xmm4, %xmm6
435 ; SSE-NEXT: andnps %xmm1, %xmm6
436 ; SSE-NEXT: orps %xmm7, %xmm6
437 ; SSE-NEXT: movaps %xmm4, %xmm7
438 ; SSE-NEXT: andnps %xmm2, %xmm7
439 ; SSE-NEXT: orps %xmm9, %xmm7
440 ; SSE-NEXT: andnps %xmm3, %xmm4
441 ; SSE-NEXT: orps %xmm8, %xmm4
442 ; SSE-NEXT: movaps %xmm5, %xmm0
443 ; SSE-NEXT: movaps %xmm6, %xmm1
444 ; SSE-NEXT: movaps %xmm7, %xmm2
445 ; SSE-NEXT: movaps %xmm4, %xmm3
448 ; XOP-LABEL: bitselect_v8i64_mr:
450 ; XOP-NEXT: vmovdqa (%rdi), %ymm2
451 ; XOP-NEXT: vmovdqa 32(%rdi), %ymm3
452 ; XOP-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [12884901890,4294967296,12884901890,4294967296]
453 ; XOP-NEXT: # ymm4 = mem[0,1,0,1]
454 ; XOP-NEXT: vpcmov %ymm4, %ymm0, %ymm2, %ymm0
455 ; XOP-NEXT: vpcmov %ymm4, %ymm1, %ymm3, %ymm1
458 ; AVX1-LABEL: bitselect_v8i64_mr:
460 ; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [12884901890,4294967296,12884901890,4294967296]
461 ; AVX1-NEXT: # ymm2 = mem[0,1,0,1]
462 ; AVX1-NEXT: vandps 32(%rdi), %ymm2, %ymm3
463 ; AVX1-NEXT: vandps (%rdi), %ymm2, %ymm4
464 ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
465 ; AVX1-NEXT: vorps %ymm0, %ymm4, %ymm0
466 ; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1
467 ; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
470 ; AVX2-LABEL: bitselect_v8i64_mr:
472 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [12884901890,4294967296,12884901890,4294967296]
473 ; AVX2-NEXT: # ymm2 = mem[0,1,0,1]
474 ; AVX2-NEXT: vandps 32(%rdi), %ymm2, %ymm3
475 ; AVX2-NEXT: vandps (%rdi), %ymm2, %ymm4
476 ; AVX2-NEXT: vandnps %ymm0, %ymm2, %ymm0
477 ; AVX2-NEXT: vorps %ymm0, %ymm4, %ymm0
478 ; AVX2-NEXT: vandnps %ymm1, %ymm2, %ymm1
479 ; AVX2-NEXT: vorps %ymm1, %ymm3, %ymm1
482 ; AVX512F-LABEL: bitselect_v8i64_mr:
484 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm1
485 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
486 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
487 ; AVX512F-NEXT: vporq %zmm0, %zmm1, %zmm0
489 %3 = load <8 x i64>, <8 x i64>* %0
490 %4 = and <8 x i64> %3, <i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296, i64 12884901890, i64 4294967296>
491 %5 = and <8 x i64> %1, <i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297, i64 -12884901891, i64 -4294967297>
492 %6 = or <8 x i64> %4, %5
496 define <8 x i64> @bitselect_v8i64_mm(<8 x i64>* nocapture readonly, <8 x i64>* nocapture readonly) {
497 ; SSE-LABEL: bitselect_v8i64_mm:
499 ; SSE-NEXT: movaps {{.*#+}} xmm3 = [18446744073709551612,18446744065119617022]
500 ; SSE-NEXT: movaps 48(%rsi), %xmm4
501 ; SSE-NEXT: andps %xmm3, %xmm4
502 ; SSE-NEXT: movaps 32(%rsi), %xmm5
503 ; SSE-NEXT: andps %xmm3, %xmm5
504 ; SSE-NEXT: movaps 16(%rsi), %xmm2
505 ; SSE-NEXT: andps %xmm3, %xmm2
506 ; SSE-NEXT: movaps (%rsi), %xmm1
507 ; SSE-NEXT: andps %xmm3, %xmm1
508 ; SSE-NEXT: movaps %xmm3, %xmm0
509 ; SSE-NEXT: andnps (%rdi), %xmm0
510 ; SSE-NEXT: orps %xmm1, %xmm0
511 ; SSE-NEXT: movaps %xmm3, %xmm1
512 ; SSE-NEXT: andnps 16(%rdi), %xmm1
513 ; SSE-NEXT: orps %xmm2, %xmm1
514 ; SSE-NEXT: movaps %xmm3, %xmm2
515 ; SSE-NEXT: andnps 32(%rdi), %xmm2
516 ; SSE-NEXT: orps %xmm5, %xmm2
517 ; SSE-NEXT: andnps 48(%rdi), %xmm3
518 ; SSE-NEXT: orps %xmm4, %xmm3
521 ; XOP-LABEL: bitselect_v8i64_mm:
523 ; XOP-NEXT: vmovdqa (%rsi), %ymm0
524 ; XOP-NEXT: vmovdqa 32(%rsi), %ymm1
525 ; XOP-NEXT: vbroadcastf128 {{.*#+}} ymm2 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
526 ; XOP-NEXT: # ymm2 = mem[0,1,0,1]
527 ; XOP-NEXT: vpcmov %ymm2, (%rdi), %ymm0, %ymm0
528 ; XOP-NEXT: vpcmov %ymm2, 32(%rdi), %ymm1, %ymm1
531 ; AVX1-LABEL: bitselect_v8i64_mm:
533 ; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
534 ; AVX1-NEXT: # ymm1 = mem[0,1,0,1]
535 ; AVX1-NEXT: vandps 32(%rsi), %ymm1, %ymm2
536 ; AVX1-NEXT: vandps (%rsi), %ymm1, %ymm0
537 ; AVX1-NEXT: vandnps (%rdi), %ymm1, %ymm3
538 ; AVX1-NEXT: vorps %ymm3, %ymm0, %ymm0
539 ; AVX1-NEXT: vandnps 32(%rdi), %ymm1, %ymm1
540 ; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
543 ; AVX2-LABEL: bitselect_v8i64_mm:
545 ; AVX2-NEXT: vbroadcastf128 {{.*#+}} ymm1 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
546 ; AVX2-NEXT: # ymm1 = mem[0,1,0,1]
547 ; AVX2-NEXT: vandps 32(%rsi), %ymm1, %ymm2
548 ; AVX2-NEXT: vandps (%rsi), %ymm1, %ymm0
549 ; AVX2-NEXT: vandnps (%rdi), %ymm1, %ymm3
550 ; AVX2-NEXT: vorps %ymm3, %ymm0, %ymm0
551 ; AVX2-NEXT: vandnps 32(%rdi), %ymm1, %ymm1
552 ; AVX2-NEXT: vorps %ymm1, %ymm2, %ymm1
555 ; AVX512F-LABEL: bitselect_v8i64_mm:
557 ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0
558 ; AVX512F-NEXT: vmovdqa64 (%rsi), %zmm1
559 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
560 ; AVX512F-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
561 ; AVX512F-NEXT: vporq %zmm0, %zmm1, %zmm0
563 %3 = load <8 x i64>, <8 x i64>* %0
564 %4 = load <8 x i64>, <8 x i64>* %1
565 %5 = and <8 x i64> %3, <i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593, i64 3, i64 8589934593>
566 %6 = and <8 x i64> %4, <i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594, i64 -4, i64 -8589934594>
567 %7 = or <8 x i64> %6, %5
571 ; Check that mask registers don't get canonicalized.
572 define <4 x i1> @bitselect_v4i1_loop(<4 x i32> %a0, <4 x i32> %a1) {
573 ; SSE-LABEL: bitselect_v4i1_loop:
574 ; SSE: # %bb.0: # %bb
575 ; SSE-NEXT: pxor %xmm2, %xmm2
576 ; SSE-NEXT: pcmpeqd %xmm0, %xmm2
577 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [12,12,12,12]
578 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
579 ; SSE-NEXT: pcmpeqd {{.*}}(%rip), %xmm1
580 ; SSE-NEXT: pand %xmm2, %xmm1
581 ; SSE-NEXT: pandn %xmm0, %xmm2
582 ; SSE-NEXT: por %xmm1, %xmm2
583 ; SSE-NEXT: movdqa %xmm2, %xmm0
586 ; XOP-LABEL: bitselect_v4i1_loop:
587 ; XOP: # %bb.0: # %bb
588 ; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
589 ; XOP-NEXT: vpcomneqd %xmm2, %xmm0, %xmm0
590 ; XOP-NEXT: vpcomeqd {{.*}}(%rip), %xmm1, %xmm2
591 ; XOP-NEXT: vpcomeqd {{.*}}(%rip), %xmm1, %xmm1
592 ; XOP-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
595 ; AVX1-LABEL: bitselect_v4i1_loop:
596 ; AVX1: # %bb.0: # %bb
597 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
598 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
599 ; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %xmm2
600 ; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %xmm1
601 ; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
604 ; AVX2-LABEL: bitselect_v4i1_loop:
605 ; AVX2: # %bb.0: # %bb
606 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
607 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
608 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [12,12,12,12]
609 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm2
610 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [15,15,15,15]
611 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm1
612 ; AVX2-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
615 ; AVX512F-LABEL: bitselect_v4i1_loop:
616 ; AVX512F: # %bb.0: # %bb
617 ; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
618 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
619 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm2 = [12,12,12,12]
620 ; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm1, %k1
621 ; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm2 = [15,15,15,15]
622 ; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm1, %k2
623 ; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0 {%k2}
624 ; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1 {%k1}
625 ; AVX512F-NEXT: korw %k0, %k1, %k1
626 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
627 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
628 ; AVX512F-NEXT: vzeroupper
631 %tmp = icmp ne <4 x i32> %a0, zeroinitializer
632 %tmp2 = icmp eq <4 x i32> %a1, <i32 12, i32 12, i32 12, i32 12>
633 %tmp3 = icmp eq <4 x i32> %a1, <i32 15, i32 15, i32 15, i32 15>
634 %tmp4 = select <4 x i1> %tmp, <4 x i1> %tmp2, <4 x i1> %tmp3