1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
4 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx2 < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
5 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
7 define i32 @and_self(i32 %x) {
8 ; CHECK-LABEL: and_self:
10 ; CHECK-NEXT: movl %edi, %eax
16 define <4 x i32> @and_self_vec(<4 x i32> %x) {
17 ; CHECK-LABEL: and_self_vec:
20 %and = and <4 x i32> %x, %x
25 ; Verify that the DAGCombiner is able to fold a vector AND into a blend
26 ; if one of the operands to the AND is a vector of all constants, and each
27 ; constant element is either zero or all-ones.
30 define <4 x i32> @test1(<4 x i32> %A) {
33 ; SSE-NEXT: xorps %xmm1, %xmm1
34 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
39 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
40 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
42 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 0>
46 define <4 x i32> @test2(<4 x i32> %A) {
49 ; SSE-NEXT: xorps %xmm1, %xmm1
50 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
55 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
56 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
58 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 0>
62 define <4 x i32> @test3(<4 x i32> %A) {
65 ; SSE-NEXT: xorps %xmm1, %xmm1
66 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
71 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
72 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
74 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 0>
78 define <4 x i32> @test4(<4 x i32> %A) {
81 ; SSE-NEXT: xorps %xmm1, %xmm1
82 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
87 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
88 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
90 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 0, i32 -1>
94 define <4 x i32> @test5(<4 x i32> %A) {
97 ; SSE-NEXT: xorps %xmm1, %xmm1
98 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
103 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
104 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
106 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
110 define <4 x i32> @test6(<4 x i32> %A) {
113 ; SSE-NEXT: xorps %xmm1, %xmm1
114 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
119 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
120 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
122 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
126 define <4 x i32> @test7(<4 x i32> %A) {
129 ; SSE-NEXT: xorps %xmm1, %xmm1
130 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
135 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
136 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
138 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 -1>
142 define <4 x i32> @test8(<4 x i32> %A) {
145 ; SSE-NEXT: xorps %xmm1, %xmm1
146 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
151 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
152 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3]
154 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 -1>
158 define <4 x i32> @test9(<4 x i32> %A) {
161 ; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
166 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
168 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 0>
172 define <4 x i32> @test10(<4 x i32> %A) {
175 ; SSE-NEXT: xorps %xmm1, %xmm1
176 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
181 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
182 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
184 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 0>
188 define <4 x i32> @test11(<4 x i32> %A) {
191 ; SSE-NEXT: xorps %xmm1, %xmm1
192 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
197 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
198 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
200 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 -1>
204 define <4 x i32> @test12(<4 x i32> %A) {
207 ; SSE-NEXT: xorps %xmm1, %xmm1
208 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
213 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
214 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
216 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 -1, i32 0>
220 define <4 x i32> @test13(<4 x i32> %A) {
223 ; SSE-NEXT: xorps %xmm1, %xmm1
224 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
229 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
230 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
232 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 -1>
236 define <4 x i32> @test14(<4 x i32> %A) {
239 ; SSE-NEXT: xorps %xmm1, %xmm1
240 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
245 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
246 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
248 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
252 ; X & undef must fold to 0. So lane 0 must choose from the zero vector.
254 define <4 x i32> @undef_lane(<4 x i32> %x) {
255 ; SSE-LABEL: undef_lane:
257 ; SSE-NEXT: xorps %xmm1, %xmm1
258 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
261 ; AVX-LABEL: undef_lane:
263 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
264 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
266 %r = and <4 x i32> %x, <i32 undef, i32 4294967295, i32 0, i32 4294967295>
270 define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
273 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
278 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
280 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
281 %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 0>
282 %3 = or <4 x i32> %1, %2
286 define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
289 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
294 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
296 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
297 %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 -1>
298 %3 = or <4 x i32> %1, %2
302 define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
305 ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
310 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
312 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
313 %2 = and <4 x i32> %B, <i32 -1, i32 0, i32 -1, i32 0>
314 %3 = or <4 x i32> %1, %2
319 ; fold (and (or x, C), D) -> D if (C & D) == D
322 define <2 x i64> @and_or_v2i64(<2 x i64> %a0) {
323 ; SSE-LABEL: and_or_v2i64:
325 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,8]
328 ; AVX-LABEL: and_or_v2i64:
330 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = [8,8]
331 ; AVX-NEXT: # xmm0 = mem[0,0]
333 %1 = or <2 x i64> %a0, <i64 255, i64 255>
334 %2 = and <2 x i64> %1, <i64 8, i64 8>
338 define <4 x i32> @and_or_v4i32(<4 x i32> %a0) {
339 ; SSE-LABEL: and_or_v4i32:
341 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
344 ; AVX-LABEL: and_or_v4i32:
346 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm0 = [3,3,3,3]
348 %1 = or <4 x i32> %a0, <i32 15, i32 15, i32 15, i32 15>
349 %2 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
353 define <8 x i16> @and_or_v8i16(<8 x i16> %a0) {
354 ; SSE-LABEL: and_or_v8i16:
356 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [15,7,3,1,14,10,2,32767]
359 ; AVX-LABEL: and_or_v8i16:
361 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [15,7,3,1,14,10,2,32767]
363 %1 = or <8 x i16> %a0, <i16 255, i16 127, i16 63, i16 31, i16 15, i16 31, i16 63, i16 -1>
364 %2 = and <8 x i16> %1, <i16 15, i16 7, i16 3, i16 1, i16 14, i16 10, i16 2, i16 32767>
369 ; Check we merge and(ext(and(x,c1)),c2) before an and gets folded to a shuffle clear mask
372 define <8 x i32> @clear_sext_and(<8 x i16> %x) {
373 ; SSE-LABEL: clear_sext_and:
375 ; SSE-NEXT: pmovsxwd %xmm0, %xmm2
376 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
377 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
378 ; SSE-NEXT: pmovsxwd %xmm0, %xmm1
379 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
380 ; SSE-NEXT: movdqa %xmm2, %xmm0
383 ; AVX1-LABEL: clear_sext_and:
385 ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
386 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
387 ; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
388 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
389 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
392 ; AVX2-LABEL: clear_sext_and:
394 ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
395 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
398 ; AVX512-LABEL: clear_sext_and:
400 ; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
401 ; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
403 %1 = and <8 x i16> %x, <i16 -1, i16 3, i16 7, i16 15, i16 31, i16 63, i16 127, i16 -1>
404 %2 = sext <8 x i16> %1 to <8 x i32>
405 %3 = and <8 x i32> %2, <i32 -1, i32 0, i32 -1, i32 0, i32 0, i32 -1, i32 -1, i32 -1>
409 define <8 x i32> @clear_zext_and(<8 x i16> %x) {
410 ; SSE-LABEL: clear_zext_and:
412 ; SSE-NEXT: movdqa %xmm0, %xmm1
413 ; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
414 ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
415 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
416 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
419 ; AVX1-LABEL: clear_zext_and:
421 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
422 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
423 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
424 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
427 ; AVX2-LABEL: clear_zext_and:
429 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
430 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
433 ; AVX512-LABEL: clear_zext_and:
435 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
436 ; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
438 %1 = and <8 x i16> %x, <i16 -1, i16 3, i16 7, i16 15, i16 31, i16 63, i16 127, i16 -1>
439 %2 = zext <8 x i16> %1 to <8 x i32>
440 %3 = and <8 x i32> %2, <i32 -1, i32 0, i32 -1, i32 0, i32 0, i32 -1, i32 -1, i32 -1>
448 define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
449 ; SSE-LABEL: and_or_zext_v2i32:
451 ; SSE-NEXT: xorps %xmm0, %xmm0
454 ; AVX-LABEL: and_or_zext_v2i32:
456 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
458 %1 = zext <2 x i32> %a0 to <2 x i64>
459 %2 = or <2 x i64> %1, <i64 1, i64 1>
460 %3 = and <2 x i64> %2, <i64 4294967296, i64 4294967296>
464 define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
465 ; SSE-LABEL: and_or_zext_v4i16:
467 ; SSE-NEXT: xorps %xmm0, %xmm0
470 ; AVX-LABEL: and_or_zext_v4i16:
472 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
474 %1 = zext <4 x i16> %a0 to <4 x i32>
475 %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
476 %3 = and <4 x i32> %2, <i32 65536, i32 65536, i32 65536, i32 65536>
481 ; known sign bits folding
484 define <8 x i16> @ashr_mask1_v8i16(<8 x i16> %a0) {
485 ; SSE-LABEL: ashr_mask1_v8i16:
487 ; SSE-NEXT: psrlw $15, %xmm0
490 ; AVX-LABEL: ashr_mask1_v8i16:
492 ; AVX-NEXT: vpsrlw $15, %xmm0, %xmm0
494 %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
495 %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
499 define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
500 ; SSE-LABEL: ashr_mask7_v4i32:
502 ; SSE-NEXT: psrad $31, %xmm0
503 ; SSE-NEXT: psrld $29, %xmm0
506 ; AVX-LABEL: ashr_mask7_v4i32:
508 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
509 ; AVX-NEXT: vpsrld $29, %xmm0, %xmm0
511 %1 = ashr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
512 %2 = and <4 x i32> %1, <i32 7, i32 7, i32 7, i32 7>
517 ; SimplifyDemandedBits
520 ; PR34620 - redundant PAND after vector shift of a byte vector (PSRLW)
521 define <16 x i8> @PR34620(<16 x i8> %a0, <16 x i8> %a1) {
522 ; SSE-LABEL: PR34620:
524 ; SSE-NEXT: psrlw $1, %xmm0
525 ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
526 ; SSE-NEXT: paddb %xmm1, %xmm0
529 ; AVX1-LABEL: PR34620:
531 ; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
532 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
533 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
536 ; AVX2-LABEL: PR34620:
538 ; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
539 ; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
540 ; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
543 ; AVX512-LABEL: PR34620:
545 ; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
546 ; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
547 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
549 %1 = lshr <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
550 %2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
551 %3 = add <16 x i8> %2, %a1
556 ; Simplify and with a broadcasted negated scalar
559 define <8 x i64> @neg_scalar_broadcast_v8i64_arg(i64 %a0, <8 x i64> %a1) {
560 ; SSE-LABEL: neg_scalar_broadcast_v8i64_arg:
562 ; SSE-NEXT: notq %rdi
563 ; SSE-NEXT: movq %rdi, %xmm4
564 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
565 ; SSE-NEXT: pand %xmm4, %xmm0
566 ; SSE-NEXT: pand %xmm4, %xmm1
567 ; SSE-NEXT: pand %xmm4, %xmm2
568 ; SSE-NEXT: pand %xmm4, %xmm3
571 ; AVX1-LABEL: neg_scalar_broadcast_v8i64_arg:
573 ; AVX1-NEXT: notq %rdi
574 ; AVX1-NEXT: vmovq %rdi, %xmm2
575 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
576 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2
577 ; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm0
578 ; AVX1-NEXT: vandps %ymm1, %ymm2, %ymm1
581 ; AVX2-LABEL: neg_scalar_broadcast_v8i64_arg:
583 ; AVX2-NEXT: notq %rdi
584 ; AVX2-NEXT: vmovq %rdi, %xmm2
585 ; AVX2-NEXT: vpbroadcastq %xmm2, %ymm2
586 ; AVX2-NEXT: vpand %ymm0, %ymm2, %ymm0
587 ; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
590 ; AVX512-LABEL: neg_scalar_broadcast_v8i64_arg:
592 ; AVX512-NEXT: vpbroadcastq %rdi, %zmm1
593 ; AVX512-NEXT: vpandnq %zmm0, %zmm1, %zmm0
596 %2 = insertelement <8 x i64> undef, i64 %1, i64 0
597 %3 = shufflevector <8 x i64> %2, <8 x i64> poison, <8 x i32> zeroinitializer
598 %4 = and <8 x i64> %3, %a1
602 define <8 x i64> @neg_scalar_broadcast_v8i64(i64 %a0, <2 x i64> %a1) {
603 ; SSE-LABEL: neg_scalar_broadcast_v8i64:
605 ; SSE-NEXT: movdqa %xmm0, %xmm2
606 ; SSE-NEXT: notq %rdi
607 ; SSE-NEXT: movq %rdi, %xmm0
608 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
609 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,0,1]
610 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
611 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
612 ; SSE-NEXT: pand %xmm4, %xmm0
613 ; SSE-NEXT: pand %xmm4, %xmm1
614 ; SSE-NEXT: pand %xmm4, %xmm2
615 ; SSE-NEXT: pand %xmm4, %xmm3
618 ; AVX1-LABEL: neg_scalar_broadcast_v8i64:
620 ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
621 ; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,1,0,1]
622 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
623 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
624 ; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1,0,3,3]
625 ; AVX1-NEXT: vmovq %rdi, %xmm2
626 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
627 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2
628 ; AVX1-NEXT: vandnpd %ymm0, %ymm2, %ymm0
629 ; AVX1-NEXT: vandnpd %ymm1, %ymm2, %ymm1
632 ; AVX2-LABEL: neg_scalar_broadcast_v8i64:
634 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
635 ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm0[0,1,0,0]
636 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,1,1]
637 ; AVX2-NEXT: vmovq %rdi, %xmm2
638 ; AVX2-NEXT: vpbroadcastq %xmm2, %ymm2
639 ; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
640 ; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1
643 ; AVX512-LABEL: neg_scalar_broadcast_v8i64:
645 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
646 ; AVX512-NEXT: vpbroadcastq %rdi, %zmm1
647 ; AVX512-NEXT: vpmovsxbq {{.*#+}} zmm2 = [1,0,1,1,0,1,0,0]
648 ; AVX512-NEXT: vpermq %zmm0, %zmm2, %zmm0
649 ; AVX512-NEXT: vpandnq %zmm0, %zmm1, %zmm0
652 %2 = insertelement <8 x i64> undef, i64 %1, i64 0
653 %3 = shufflevector <8 x i64> %2, <8 x i64> poison, <8 x i32> zeroinitializer
654 %4 = shufflevector <2 x i64> %a1, <2 x i64> poison, <8 x i32> <i32 1, i32 0, i32 1, i32 1, i32 0, i32 1, i32 0, i32 0>
655 %5 = and <8 x i64> %4, %3
659 define <4 x i64> @neg_scalar_broadcast_v4i64_arg(i64 %a0, <4 x i64> %a1) {
660 ; SSE-LABEL: neg_scalar_broadcast_v4i64_arg:
662 ; SSE-NEXT: notq %rdi
663 ; SSE-NEXT: movq %rdi, %xmm2
664 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
665 ; SSE-NEXT: pand %xmm2, %xmm0
666 ; SSE-NEXT: pand %xmm2, %xmm1
669 ; AVX1-LABEL: neg_scalar_broadcast_v4i64_arg:
671 ; AVX1-NEXT: vmovq %rdi, %xmm1
672 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
673 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
674 ; AVX1-NEXT: vandnps %ymm0, %ymm1, %ymm0
677 ; AVX2-LABEL: neg_scalar_broadcast_v4i64_arg:
679 ; AVX2-NEXT: vmovq %rdi, %xmm1
680 ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1
681 ; AVX2-NEXT: vpandn %ymm0, %ymm1, %ymm0
684 ; AVX512-LABEL: neg_scalar_broadcast_v4i64_arg:
686 ; AVX512-NEXT: vpbroadcastq %rdi, %ymm1
687 ; AVX512-NEXT: vpandn %ymm0, %ymm1, %ymm0
690 %2 = insertelement <4 x i64> undef, i64 %1, i64 0
691 %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <4 x i32> zeroinitializer
692 %4 = and <4 x i64> %3, %a1
696 define <4 x i64> @neg_scalar_broadcast_v4i64(i64 %a0, <2 x i64> %a1) {
697 ; SSE-LABEL: neg_scalar_broadcast_v4i64:
699 ; SSE-NEXT: notq %rdi
700 ; SSE-NEXT: movq %rdi, %xmm1
701 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,1,0,1]
702 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
703 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
704 ; SSE-NEXT: pand %xmm2, %xmm0
705 ; SSE-NEXT: pand %xmm2, %xmm1
708 ; AVX1-LABEL: neg_scalar_broadcast_v4i64:
710 ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
711 ; AVX1-NEXT: vmovq %rdi, %xmm1
712 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
713 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
714 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
715 ; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1,0,3,3]
716 ; AVX1-NEXT: vandnpd %ymm0, %ymm1, %ymm0
719 ; AVX2-LABEL: neg_scalar_broadcast_v4i64:
721 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
722 ; AVX2-NEXT: vmovq %rdi, %xmm1
723 ; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1
724 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,1,1]
725 ; AVX2-NEXT: vpandn %ymm0, %ymm1, %ymm0
728 ; AVX512-LABEL: neg_scalar_broadcast_v4i64:
730 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
731 ; AVX512-NEXT: vpbroadcastq %rdi, %ymm1
732 ; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,0,1,1]
733 ; AVX512-NEXT: vpandn %ymm0, %ymm1, %ymm0
736 %2 = insertelement <4 x i64> undef, i64 %1, i64 0
737 %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <4 x i32> zeroinitializer
738 %4 = shufflevector <2 x i64> %a1, <2 x i64> poison, <4 x i32> <i32 1, i32 0, i32 1, i32 1>
739 %5 = and <4 x i64> %4, %3
743 define <2 x i64> @neg_scalar_broadcast_v2i64(i64 %a0, <2 x i64> %a1) {
744 ; SSE-LABEL: neg_scalar_broadcast_v2i64:
746 ; SSE-NEXT: movq %rdi, %xmm1
747 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
748 ; SSE-NEXT: pandn %xmm0, %xmm1
749 ; SSE-NEXT: movdqa %xmm1, %xmm0
752 ; AVX1-LABEL: neg_scalar_broadcast_v2i64:
754 ; AVX1-NEXT: vmovq %rdi, %xmm1
755 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
756 ; AVX1-NEXT: vpandn %xmm0, %xmm1, %xmm0
759 ; AVX2-LABEL: neg_scalar_broadcast_v2i64:
761 ; AVX2-NEXT: vmovq %rdi, %xmm1
762 ; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
763 ; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0
766 ; AVX512-LABEL: neg_scalar_broadcast_v2i64:
768 ; AVX512-NEXT: vpbroadcastq %rdi, %xmm1
769 ; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0
772 %2 = insertelement <2 x i64> undef, i64 %1, i64 0
773 %3 = shufflevector <2 x i64> %2, <2 x i64> poison, <2 x i32> zeroinitializer
774 %4 = and <2 x i64> %3, %a1
778 define <2 x i64> @casted_neg_scalar_broadcast_v2i64(<2 x i32> %a0, <2 x i64> %a1) {
779 ; SSE-LABEL: casted_neg_scalar_broadcast_v2i64:
781 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
782 ; SSE-NEXT: pandn %xmm1, %xmm0
785 ; AVX1-LABEL: casted_neg_scalar_broadcast_v2i64:
787 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1,0,1]
788 ; AVX1-NEXT: vandnps %xmm1, %xmm0, %xmm0
791 ; AVX2-LABEL: casted_neg_scalar_broadcast_v2i64:
793 ; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
794 ; AVX2-NEXT: vandnps %xmm1, %xmm0, %xmm0
797 ; AVX512-LABEL: casted_neg_scalar_broadcast_v2i64:
799 ; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
800 ; AVX512-NEXT: vandnps %xmm1, %xmm0, %xmm0
802 %1 = xor <2 x i32> %a0, <i32 -1, i32 -1>
803 %2 = bitcast <2 x i32> %1 to i64
804 %3 = insertelement <2 x i64> undef, i64 %2, i64 0
805 %4 = shufflevector <2 x i64> %3, <2 x i64> poison, <2 x i32> zeroinitializer
806 %5 = and <2 x i64> %4, %a1
810 define <8 x i32> @neg_scalar_broadcast_v8i32(i32 %a0, <8 x i32> %a1) {
811 ; SSE-LABEL: neg_scalar_broadcast_v8i32:
813 ; SSE-NEXT: notl %edi
814 ; SSE-NEXT: movd %edi, %xmm2
815 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
816 ; SSE-NEXT: pand %xmm2, %xmm0
817 ; SSE-NEXT: pand %xmm2, %xmm1
820 ; AVX1-LABEL: neg_scalar_broadcast_v8i32:
822 ; AVX1-NEXT: vmovd %edi, %xmm1
823 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
824 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
825 ; AVX1-NEXT: vandnps %ymm0, %ymm1, %ymm0
828 ; AVX2-LABEL: neg_scalar_broadcast_v8i32:
830 ; AVX2-NEXT: vmovd %edi, %xmm1
831 ; AVX2-NEXT: vpbroadcastd %xmm1, %ymm1
832 ; AVX2-NEXT: vpandn %ymm0, %ymm1, %ymm0
835 ; AVX512-LABEL: neg_scalar_broadcast_v8i32:
837 ; AVX512-NEXT: vpbroadcastd %edi, %ymm1
838 ; AVX512-NEXT: vpandn %ymm0, %ymm1, %ymm0
841 %2 = insertelement <8 x i32> undef, i32 %1, i64 0
842 %3 = shufflevector <8 x i32> %2, <8 x i32> poison, <8 x i32> zeroinitializer
843 %4 = and <8 x i32> %3, %a1
847 define <8 x i16> @neg_scalar_broadcast_v8i16(i16 %a0, <8 x i16> %a1) {
848 ; SSE-LABEL: neg_scalar_broadcast_v8i16:
850 ; SSE-NEXT: movd %edi, %xmm1
851 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
852 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
853 ; SSE-NEXT: pandn %xmm0, %xmm1
854 ; SSE-NEXT: movdqa %xmm1, %xmm0
857 ; AVX1-LABEL: neg_scalar_broadcast_v8i16:
859 ; AVX1-NEXT: vmovd %edi, %xmm1
860 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
861 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
862 ; AVX1-NEXT: vpandn %xmm0, %xmm1, %xmm0
865 ; AVX2-LABEL: neg_scalar_broadcast_v8i16:
867 ; AVX2-NEXT: vmovd %edi, %xmm1
868 ; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
869 ; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0
872 ; AVX512-LABEL: neg_scalar_broadcast_v8i16:
874 ; AVX512-NEXT: vpbroadcastw %edi, %xmm1
875 ; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0
878 %2 = insertelement <8 x i16> undef, i16 %1, i64 0
879 %3 = shufflevector <8 x i16> %2, <8 x i16> poison, <8 x i32> zeroinitializer
880 %4 = and <8 x i16> %3, %a1
884 define <16 x i8> @neg_scalar_broadcast_v16i8(i8 %a0, <16 x i8> %a1) {
885 ; SSE-LABEL: neg_scalar_broadcast_v16i8:
887 ; SSE-NEXT: movd %edi, %xmm1
888 ; SSE-NEXT: pxor %xmm2, %xmm2
889 ; SSE-NEXT: pshufb %xmm2, %xmm1
890 ; SSE-NEXT: pandn %xmm0, %xmm1
891 ; SSE-NEXT: movdqa %xmm1, %xmm0
894 ; AVX1-LABEL: neg_scalar_broadcast_v16i8:
896 ; AVX1-NEXT: vmovd %edi, %xmm1
897 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
898 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
899 ; AVX1-NEXT: vpandn %xmm0, %xmm1, %xmm0
902 ; AVX2-LABEL: neg_scalar_broadcast_v16i8:
904 ; AVX2-NEXT: vmovd %edi, %xmm1
905 ; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
906 ; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0
909 ; AVX512-LABEL: neg_scalar_broadcast_v16i8:
911 ; AVX512-NEXT: vpbroadcastb %edi, %xmm1
912 ; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0
915 %2 = insertelement <16 x i8> undef, i8 %1, i64 0
916 %3 = shufflevector <16 x i8> %2, <16 x i8> poison, <16 x i32> zeroinitializer
917 %4 = and <16 x i8> %3, %a1
921 define <64 x i8> @neg_scalar_broadcast_v64i8(i8 %a0, <64 x i8> %a1) {
922 ; SSE-LABEL: neg_scalar_broadcast_v64i8:
924 ; SSE-NEXT: notb %dil
925 ; SSE-NEXT: movzbl %dil, %eax
926 ; SSE-NEXT: movd %eax, %xmm4
927 ; SSE-NEXT: pxor %xmm5, %xmm5
928 ; SSE-NEXT: pshufb %xmm5, %xmm4
929 ; SSE-NEXT: pand %xmm4, %xmm0
930 ; SSE-NEXT: pand %xmm4, %xmm1
931 ; SSE-NEXT: pand %xmm4, %xmm2
932 ; SSE-NEXT: pand %xmm4, %xmm3
935 ; AVX1-LABEL: neg_scalar_broadcast_v64i8:
937 ; AVX1-NEXT: notb %dil
938 ; AVX1-NEXT: vmovd %edi, %xmm2
939 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
940 ; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
941 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2
942 ; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm0
943 ; AVX1-NEXT: vandps %ymm1, %ymm2, %ymm1
946 ; AVX2-LABEL: neg_scalar_broadcast_v64i8:
948 ; AVX2-NEXT: notb %dil
949 ; AVX2-NEXT: vmovd %edi, %xmm2
950 ; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
951 ; AVX2-NEXT: vpand %ymm0, %ymm2, %ymm0
952 ; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
955 ; AVX512-LABEL: neg_scalar_broadcast_v64i8:
957 ; AVX512-NEXT: vpbroadcastb %edi, %zmm1
958 ; AVX512-NEXT: vpandnq %zmm0, %zmm1, %zmm0
961 %2 = insertelement <64 x i8> undef, i8 %1, i64 0
962 %3 = shufflevector <64 x i8> %2, <64 x i8> poison, <64 x i32> zeroinitializer
963 %4 = and <64 x i8> %3, %a1
967 define <8 x i64> @neg_scalar_broadcast_v64i8_v8i64(i8 %a0, <8 x i64> %a1) {
968 ; SSE-LABEL: neg_scalar_broadcast_v64i8_v8i64:
970 ; SSE-NEXT: notb %dil
971 ; SSE-NEXT: movzbl %dil, %eax
972 ; SSE-NEXT: movd %eax, %xmm4
973 ; SSE-NEXT: pxor %xmm5, %xmm5
974 ; SSE-NEXT: pshufb %xmm5, %xmm4
975 ; SSE-NEXT: pand %xmm4, %xmm0
976 ; SSE-NEXT: pand %xmm4, %xmm1
977 ; SSE-NEXT: pand %xmm4, %xmm2
978 ; SSE-NEXT: pand %xmm4, %xmm3
981 ; AVX1-LABEL: neg_scalar_broadcast_v64i8_v8i64:
983 ; AVX1-NEXT: notb %dil
984 ; AVX1-NEXT: vmovd %edi, %xmm2
985 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
986 ; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
987 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm2, %ymm2
988 ; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm0
989 ; AVX1-NEXT: vandps %ymm1, %ymm2, %ymm1
992 ; AVX2-LABEL: neg_scalar_broadcast_v64i8_v8i64:
994 ; AVX2-NEXT: notb %dil
995 ; AVX2-NEXT: vmovd %edi, %xmm2
996 ; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
997 ; AVX2-NEXT: vpand %ymm0, %ymm2, %ymm0
998 ; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
1001 ; AVX512-LABEL: neg_scalar_broadcast_v64i8_v8i64:
1003 ; AVX512-NEXT: vpbroadcastb %edi, %zmm1
1004 ; AVX512-NEXT: vpandnq %zmm0, %zmm1, %zmm0
1007 %2 = insertelement <64 x i8> undef, i8 %1, i64 0
1008 %3 = shufflevector <64 x i8> %2, <64 x i8> poison, <64 x i32> zeroinitializer
1009 %4 = bitcast <64 x i8> %3 to <8 x i64>
1010 %5 = and <8 x i64> %4, %a1
1014 define <4 x i64> @neg_scalar_broadcast_v32i8_v4i64(i8 %a0, <4 x i64> %a1) {
1015 ; SSE-LABEL: neg_scalar_broadcast_v32i8_v4i64:
1017 ; SSE-NEXT: notb %dil
1018 ; SSE-NEXT: movzbl %dil, %eax
1019 ; SSE-NEXT: movd %eax, %xmm2
1020 ; SSE-NEXT: pxor %xmm3, %xmm3
1021 ; SSE-NEXT: pshufb %xmm3, %xmm2
1022 ; SSE-NEXT: pand %xmm2, %xmm0
1023 ; SSE-NEXT: pand %xmm2, %xmm1
1026 ; AVX1-LABEL: neg_scalar_broadcast_v32i8_v4i64:
1028 ; AVX1-NEXT: vmovd %edi, %xmm1
1029 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1030 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1031 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
1032 ; AVX1-NEXT: vandnps %ymm0, %ymm1, %ymm0
1035 ; AVX2-LABEL: neg_scalar_broadcast_v32i8_v4i64:
1037 ; AVX2-NEXT: vmovd %edi, %xmm1
1038 ; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
1039 ; AVX2-NEXT: vpandn %ymm0, %ymm1, %ymm0
1042 ; AVX512-LABEL: neg_scalar_broadcast_v32i8_v4i64:
1044 ; AVX512-NEXT: vpbroadcastb %edi, %ymm1
1045 ; AVX512-NEXT: vpandn %ymm0, %ymm1, %ymm0
1048 %2 = insertelement <32 x i8> undef, i8 %1, i64 0
1049 %3 = shufflevector <32 x i8> %2, <32 x i8> poison, <32 x i32> zeroinitializer
1050 %4 = bitcast <32 x i8> %3 to <4 x i64>
1051 %5 = and <4 x i64> %4, %a1
1055 define <2 x i64> @neg_scalar_broadcast_v16i8_v2i64(i8 %a0, <2 x i64> %a1) {
1056 ; SSE-LABEL: neg_scalar_broadcast_v16i8_v2i64:
1058 ; SSE-NEXT: movd %edi, %xmm1
1059 ; SSE-NEXT: pxor %xmm2, %xmm2
1060 ; SSE-NEXT: pshufb %xmm2, %xmm1
1061 ; SSE-NEXT: pandn %xmm0, %xmm1
1062 ; SSE-NEXT: movdqa %xmm1, %xmm0
1065 ; AVX1-LABEL: neg_scalar_broadcast_v16i8_v2i64:
1067 ; AVX1-NEXT: vmovd %edi, %xmm1
1068 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
1069 ; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
1070 ; AVX1-NEXT: vpandn %xmm0, %xmm1, %xmm0
1073 ; AVX2-LABEL: neg_scalar_broadcast_v16i8_v2i64:
1075 ; AVX2-NEXT: vmovd %edi, %xmm1
1076 ; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
1077 ; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0
1080 ; AVX512-LABEL: neg_scalar_broadcast_v16i8_v2i64:
1082 ; AVX512-NEXT: vpbroadcastb %edi, %xmm1
1083 ; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0
1086 %2 = insertelement <16 x i8> undef, i8 %1, i64 0
1087 %3 = shufflevector <16 x i8> %2, <16 x i8> poison, <16 x i32> zeroinitializer
1088 %4 = bitcast <16 x i8> %3 to <2 x i64>
1089 %5 = and <2 x i64> %4, %a1
1093 define <4 x i64> @neg_scalar_broadcast_v8i32_v4i64(i32 %a0, <4 x i64> %a1) {
1094 ; SSE-LABEL: neg_scalar_broadcast_v8i32_v4i64:
1096 ; SSE-NEXT: notl %edi
1097 ; SSE-NEXT: movd %edi, %xmm2
1098 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
1099 ; SSE-NEXT: pand %xmm2, %xmm0
1100 ; SSE-NEXT: pand %xmm2, %xmm1
1103 ; AVX1-LABEL: neg_scalar_broadcast_v8i32_v4i64:
1105 ; AVX1-NEXT: vmovd %edi, %xmm1
1106 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1107 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
1108 ; AVX1-NEXT: vandnps %ymm0, %ymm1, %ymm0
1111 ; AVX2-LABEL: neg_scalar_broadcast_v8i32_v4i64:
1113 ; AVX2-NEXT: vmovd %edi, %xmm1
1114 ; AVX2-NEXT: vpbroadcastd %xmm1, %ymm1
1115 ; AVX2-NEXT: vpandn %ymm0, %ymm1, %ymm0
1118 ; AVX512-LABEL: neg_scalar_broadcast_v8i32_v4i64:
1120 ; AVX512-NEXT: vpbroadcastd %edi, %ymm1
1121 ; AVX512-NEXT: vpandn %ymm0, %ymm1, %ymm0
1123 %1 = xor i32 %a0, -1
1124 %2 = insertelement <8 x i32> undef, i32 %1, i64 0
1125 %3 = shufflevector <8 x i32> %2, <8 x i32> poison, <8 x i32> zeroinitializer
1126 %4 = bitcast <8 x i32> %3 to <4 x i64>
1127 %5 = and <4 x i64> %4, %a1
1131 define <4 x i32> @neg_scalar_broadcast_two_uses(i32 %a0, <4 x i32> %a1, ptr %a2) {
1132 ; SSE-LABEL: neg_scalar_broadcast_two_uses:
1134 ; SSE-NEXT: notl %edi
1135 ; SSE-NEXT: movd %edi, %xmm1
1136 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1137 ; SSE-NEXT: movdqa %xmm1, (%rsi)
1138 ; SSE-NEXT: pand %xmm1, %xmm0
1141 ; AVX1-LABEL: neg_scalar_broadcast_two_uses:
1143 ; AVX1-NEXT: notl %edi
1144 ; AVX1-NEXT: vmovd %edi, %xmm1
1145 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
1146 ; AVX1-NEXT: vmovdqa %xmm1, (%rsi)
1147 ; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
1150 ; AVX2-LABEL: neg_scalar_broadcast_two_uses:
1152 ; AVX2-NEXT: notl %edi
1153 ; AVX2-NEXT: vmovd %edi, %xmm1
1154 ; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
1155 ; AVX2-NEXT: vmovdqa %xmm1, (%rsi)
1156 ; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
1159 ; AVX512-LABEL: neg_scalar_broadcast_two_uses:
1161 ; AVX512-NEXT: notl %edi
1162 ; AVX512-NEXT: vpbroadcastd %edi, %xmm1
1163 ; AVX512-NEXT: vmovdqa %xmm1, (%rsi)
1164 ; AVX512-NEXT: vpand %xmm0, %xmm1, %xmm0
1166 %1 = xor i32 %a0, -1
1167 %2 = insertelement <4 x i32> undef, i32 %1, i64 0
1168 %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> zeroinitializer
1169 store <4 x i32> %3, ptr %a2, align 16
1170 %4 = and <4 x i32> %3, %a1
1174 ; PR84660 - check for illegal types
1175 define <2 x i128> @neg_scalar_broadcast_illegaltype(i128 %arg) {
1176 ; CHECK-LABEL: neg_scalar_broadcast_illegaltype:
1178 ; CHECK-NEXT: movq %rdi, %rax
1179 ; CHECK-NEXT: notl %esi
1180 ; CHECK-NEXT: andl $1, %esi
1181 ; CHECK-NEXT: movq %rsi, 16(%rdi)
1182 ; CHECK-NEXT: movq %rsi, (%rdi)
1183 ; CHECK-NEXT: movq $0, 24(%rdi)
1184 ; CHECK-NEXT: movq $0, 8(%rdi)
1186 %i = xor i128 %arg, 1
1187 %i1 = insertelement <2 x i128> zeroinitializer, i128 %i, i64 0
1188 %i2 = shufflevector <2 x i128> %i1, <2 x i128> zeroinitializer, <2 x i32> zeroinitializer
1189 %i3 = and <2 x i128> <i128 1, i128 1>, %i2
1193 define <2 x i64> @andnp_xx(<2 x i64> %v0) nounwind {
1194 ; SSE-LABEL: andnp_xx:
1196 ; SSE-NEXT: xorps %xmm0, %xmm0
1199 ; AVX-LABEL: andnp_xx:
1201 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
1203 %x = xor <2 x i64> %v0, <i64 -1, i64 -1>
1204 %y = and <2 x i64> %v0, %x
1208 define <2 x i64> @andnp_xx_2(<2 x i64> %v0) nounwind {
1209 ; SSE-LABEL: andnp_xx_2:
1211 ; SSE-NEXT: xorps %xmm0, %xmm0
1214 ; AVX-LABEL: andnp_xx_2:
1216 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
1218 %x = xor <2 x i64> %v0, <i64 -1, i64 -1>
1219 %y = and <2 x i64> %x, %v0
1223 define i64 @andn_xx(i64 %v0) nounwind {
1224 ; CHECK-LABEL: andn_xx:
1226 ; CHECK-NEXT: xorl %eax, %eax
1228 %x = xor i64 %v0, -1
1229 %y = and i64 %v0, %x
1233 define i64 @andn_xx_2(i64 %v0) nounwind {
1234 ; CHECK-LABEL: andn_xx_2:
1236 ; CHECK-NEXT: xorl %eax, %eax
1238 %x = xor i64 %v0, -1
1239 %y = and i64 %x, %v0