1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX
5 ; First, check the generic pattern for any 2 vector constants. Then, check special cases where
6 ; the constants are all off-by-one. Finally, check the extra special cases where the constants
8 ; Each minimal select test is repeated with a more typical pattern that includes a compare to
9 ; generate the condition value.
11 ; TODO: If we don't have blendv, this can definitely be improved. There's also a selection of
12 ; chips where it makes sense to transform the general case blendv to 2 bit-ops. That should be
13 ; a uarch-specfic transform. At some point (Ryzen?), the implementation should catch up to the
14 ; architecture, so blendv is as fast as a single bit-op.
16 define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
17 ; SSE-LABEL: sel_C1_or_C2_vec:
19 ; SSE-NEXT: pslld $31, %xmm0
20 ; SSE-NEXT: psrad $31, %xmm0
21 ; SSE-NEXT: movdqa %xmm0, %xmm1
22 ; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
23 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
24 ; SSE-NEXT: por %xmm1, %xmm0
27 ; AVX-LABEL: sel_C1_or_C2_vec:
29 ; AVX-NEXT: vpslld $31, %xmm0, %xmm0
30 ; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
31 ; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
33 %add = select <4 x i1> %cond, <4 x i32> <i32 3000, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
37 define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
38 ; SSE-LABEL: cmp_sel_C1_or_C2_vec:
40 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
41 ; SSE-NEXT: movdqa %xmm0, %xmm1
42 ; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
43 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
44 ; SSE-NEXT: por %xmm1, %xmm0
47 ; AVX-LABEL: cmp_sel_C1_or_C2_vec:
49 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
50 ; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
51 ; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
53 %cond = icmp eq <4 x i32> %x, %y
54 %add = select <4 x i1> %cond, <4 x i32> <i32 3000, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
58 define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
59 ; SSE-LABEL: sel_Cplus1_or_C_vec:
61 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
62 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
65 ; AVX-LABEL: sel_Cplus1_or_C_vec:
67 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
68 ; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
70 %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
74 define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
75 ; SSE-LABEL: cmp_sel_Cplus1_or_C_vec:
77 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
78 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
79 ; SSE-NEXT: psubd %xmm0, %xmm1
80 ; SSE-NEXT: movdqa %xmm1, %xmm0
83 ; AVX-LABEL: cmp_sel_Cplus1_or_C_vec:
85 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
86 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
87 ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
89 %cond = icmp eq <4 x i32> %x, %y
90 %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
94 define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
95 ; SSE-LABEL: sel_Cminus1_or_C_vec:
97 ; SSE-NEXT: pslld $31, %xmm0
98 ; SSE-NEXT: psrad $31, %xmm0
99 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
102 ; AVX-LABEL: sel_Cminus1_or_C_vec:
104 ; AVX-NEXT: vpslld $31, %xmm0, %xmm0
105 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
106 ; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
108 %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
112 define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
113 ; SSE-LABEL: cmp_sel_Cminus1_or_C_vec:
115 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
116 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
119 ; AVX-LABEL: cmp_sel_Cminus1_or_C_vec:
121 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
122 ; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
124 %cond = icmp eq <4 x i32> %x, %y
125 %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
129 define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
130 ; SSE-LABEL: sel_minus1_or_0_vec:
132 ; SSE-NEXT: pslld $31, %xmm0
133 ; SSE-NEXT: psrad $31, %xmm0
136 ; AVX-LABEL: sel_minus1_or_0_vec:
138 ; AVX-NEXT: vpslld $31, %xmm0, %xmm0
139 ; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
141 %add = select <4 x i1> %cond, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
145 define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
146 ; SSE-LABEL: cmp_sel_minus1_or_0_vec:
148 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
151 ; AVX-LABEL: cmp_sel_minus1_or_0_vec:
153 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
155 %cond = icmp eq <4 x i32> %x, %y
156 %add = select <4 x i1> %cond, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
160 define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
161 ; SSE-LABEL: sel_0_or_minus1_vec:
163 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
164 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
165 ; SSE-NEXT: paddd %xmm1, %xmm0
168 ; AVX-LABEL: sel_0_or_minus1_vec:
170 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
171 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
172 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
174 %add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
178 define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
179 ; SSE-LABEL: cmp_sel_0_or_minus1_vec:
181 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
182 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1
183 ; SSE-NEXT: pxor %xmm1, %xmm0
186 ; AVX-LABEL: cmp_sel_0_or_minus1_vec:
188 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
189 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
190 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
192 %cond = icmp eq <4 x i32> %x, %y
193 %add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
197 define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
198 ; SSE-LABEL: sel_1_or_0_vec:
200 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
203 ; AVX-LABEL: sel_1_or_0_vec:
205 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
207 %add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
211 define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
212 ; SSE-LABEL: cmp_sel_1_or_0_vec:
214 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
215 ; SSE-NEXT: psrld $31, %xmm0
218 ; AVX-LABEL: cmp_sel_1_or_0_vec:
220 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
221 ; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
223 %cond = icmp eq <4 x i32> %x, %y
224 %add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
228 define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
229 ; SSE-LABEL: sel_0_or_1_vec:
231 ; SSE-NEXT: andnps {{.*}}(%rip), %xmm0
234 ; AVX-LABEL: sel_0_or_1_vec:
236 ; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0
238 %add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
242 define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) {
243 ; SSE-LABEL: cmp_sel_0_or_1_vec:
245 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0
246 ; SSE-NEXT: pandn {{.*}}(%rip), %xmm0
249 ; AVX-LABEL: cmp_sel_0_or_1_vec:
251 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
252 ; AVX-NEXT: vpandn {{.*}}(%rip), %xmm0, %xmm0
254 %cond = icmp eq <4 x i32> %x, %y
255 %add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>