1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s
4 define i32 @and_self(i32 %x) {
5 ; CHECK-LABEL: and_self:
7 ; CHECK-NEXT: movl %edi, %eax
13 define <4 x i32> @and_self_vec(<4 x i32> %x) {
14 ; CHECK-LABEL: and_self_vec:
17 %and = and <4 x i32> %x, %x
22 ; Verify that the DAGCombiner is able to fold a vector AND into a blend
23 ; if one of the operands to the AND is a vector of all constants, and each
24 ; constant element is either zero or all-ones.
27 define <4 x i32> @test1(<4 x i32> %A) {
30 ; CHECK-NEXT: pxor %xmm1, %xmm1
31 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
33 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 0>
37 define <4 x i32> @test2(<4 x i32> %A) {
40 ; CHECK-NEXT: pxor %xmm1, %xmm1
41 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
43 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 0>
47 define <4 x i32> @test3(<4 x i32> %A) {
50 ; CHECK-NEXT: pxor %xmm1, %xmm1
51 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
53 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 0>
57 define <4 x i32> @test4(<4 x i32> %A) {
60 ; CHECK-NEXT: pxor %xmm1, %xmm1
61 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
63 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 0, i32 -1>
67 define <4 x i32> @test5(<4 x i32> %A) {
70 ; CHECK-NEXT: pxor %xmm1, %xmm1
71 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
73 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
77 define <4 x i32> @test6(<4 x i32> %A) {
80 ; CHECK-NEXT: pxor %xmm1, %xmm1
81 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
83 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
87 define <4 x i32> @test7(<4 x i32> %A) {
90 ; CHECK-NEXT: pxor %xmm1, %xmm1
91 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
93 %1 = and <4 x i32> %A, <i32 0, i32 0, i32 -1, i32 -1>
97 define <4 x i32> @test8(<4 x i32> %A) {
100 ; CHECK-NEXT: pxor %xmm1, %xmm1
101 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7]
103 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 0, i32 -1>
107 define <4 x i32> @test9(<4 x i32> %A) {
108 ; CHECK-LABEL: test9:
110 ; CHECK-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
112 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 0>
116 define <4 x i32> @test10(<4 x i32> %A) {
117 ; CHECK-LABEL: test10:
119 ; CHECK-NEXT: pxor %xmm1, %xmm1
120 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
122 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 0>
126 define <4 x i32> @test11(<4 x i32> %A) {
127 ; CHECK-LABEL: test11:
129 ; CHECK-NEXT: pxor %xmm1, %xmm1
130 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
132 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 -1, i32 -1>
136 define <4 x i32> @test12(<4 x i32> %A) {
137 ; CHECK-LABEL: test12:
139 ; CHECK-NEXT: pxor %xmm1, %xmm1
140 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
142 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 -1, i32 0>
146 define <4 x i32> @test13(<4 x i32> %A) {
147 ; CHECK-LABEL: test13:
149 ; CHECK-NEXT: pxor %xmm1, %xmm1
150 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
152 %1 = and <4 x i32> %A, <i32 -1, i32 -1, i32 0, i32 -1>
156 define <4 x i32> @test14(<4 x i32> %A) {
157 ; CHECK-LABEL: test14:
159 ; CHECK-NEXT: pxor %xmm1, %xmm1
160 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
162 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
166 define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) {
167 ; CHECK-LABEL: test15:
169 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
171 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 -1>
172 %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 0>
173 %3 = or <4 x i32> %1, %2
177 define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) {
178 ; CHECK-LABEL: test16:
180 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
182 %1 = and <4 x i32> %A, <i32 -1, i32 0, i32 -1, i32 0>
183 %2 = and <4 x i32> %B, <i32 0, i32 -1, i32 0, i32 -1>
184 %3 = or <4 x i32> %1, %2
188 define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) {
189 ; CHECK-LABEL: test17:
191 ; CHECK-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
193 %1 = and <4 x i32> %A, <i32 0, i32 -1, i32 0, i32 -1>
194 %2 = and <4 x i32> %B, <i32 -1, i32 0, i32 -1, i32 0>
195 %3 = or <4 x i32> %1, %2
200 ; fold (and (or x, C), D) -> D if (C & D) == D
203 define <2 x i64> @and_or_v2i64(<2 x i64> %a0) {
204 ; CHECK-LABEL: and_or_v2i64:
206 ; CHECK-NEXT: movaps {{.*#+}} xmm0 = [8,8]
208 %1 = or <2 x i64> %a0, <i64 255, i64 255>
209 %2 = and <2 x i64> %1, <i64 8, i64 8>
213 define <4 x i32> @and_or_v4i32(<4 x i32> %a0) {
214 ; CHECK-LABEL: and_or_v4i32:
216 ; CHECK-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
218 %1 = or <4 x i32> %a0, <i32 15, i32 15, i32 15, i32 15>
219 %2 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
227 define <2 x i64> @and_or_zext_v2i32(<2 x i32> %a0) {
228 ; CHECK-LABEL: and_or_zext_v2i32:
230 ; CHECK-NEXT: xorps %xmm0, %xmm0
232 %1 = zext <2 x i32> %a0 to <2 x i64>
233 %2 = or <2 x i64> %1, <i64 1, i64 1>
234 %3 = and <2 x i64> %2, <i64 4294967296, i64 4294967296>
238 define <4 x i32> @and_or_zext_v4i16(<4 x i16> %a0) {
239 ; CHECK-LABEL: and_or_zext_v4i16:
241 ; CHECK-NEXT: xorps %xmm0, %xmm0
243 %1 = zext <4 x i16> %a0 to <4 x i32>
244 %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
245 %3 = and <4 x i32> %2, <i32 65536, i32 65536, i32 65536, i32 65536>
250 ; known sign bits folding
253 define <8 x i16> @ashr_mask1_v8i16(<8 x i16> %a0) {
254 ; CHECK-LABEL: ashr_mask1_v8i16:
256 ; CHECK-NEXT: psrlw $15, %xmm0
258 %1 = ashr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
259 %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
263 define <4 x i32> @ashr_mask7_v4i32(<4 x i32> %a0) {
264 ; CHECK-LABEL: ashr_mask7_v4i32:
266 ; CHECK-NEXT: psrad $31, %xmm0
267 ; CHECK-NEXT: psrld $29, %xmm0
269 %1 = ashr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
270 %2 = and <4 x i32> %1, <i32 7, i32 7, i32 7, i32 7>
275 ; SimplifyDemandedBits
278 ; PR34620 - redundant PAND after vector shift of a byte vector (PSRLW)
279 define <16 x i8> @PR34620(<16 x i8> %a0, <16 x i8> %a1) {
280 ; CHECK-LABEL: PR34620:
282 ; CHECK-NEXT: psrlw $1, %xmm0
283 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
284 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
285 ; CHECK-NEXT: paddb %xmm1, %xmm0
287 %1 = lshr <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
288 %2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
289 %3 = add <16 x i8> %2, %a1