1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-apple-macosx -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
3 ; RUN: llc < %s -mtriple=x86_64-apple-macosx -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
5 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
7 ; For this test we used to optimize the <i1 true, i1 false, i1 false, i1 true>
8 ; mask into <i32 2147483648, i32 0, i32 0, i32 2147483648> because we thought
9 ; we would lower that into a blend where only the high bit is relevant.
10 ; However, since the whole mask is constant, this is simplified incorrectly
11 ; by the generic code, because it was expecting -1 in place of 2147483648.
13 ; The problem does not occur without AVX, because vselect of v4i32 is not legal
16 ; <rdar://problem/18675020>
18 define void @test(<4 x i16>* %a, <4 x i16>* %b) {
20 ; AVX: ## %bb.0: ## %body
21 ; AVX-NEXT: movabsq $4167800517033787389, %rax ## imm = 0x39D7007D007CFFFD
22 ; AVX-NEXT: movq %rax, (%rdi)
23 ; AVX-NEXT: movabsq $-281474976645121, %rax ## imm = 0xFFFF00000000FFFF
24 ; AVX-NEXT: movq %rax, (%rsi)
27 %predphi = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -3, i16 545, i16 4385, i16 14807>, <4 x i16> <i16 123, i16 124, i16 125, i16 127>
28 %predphi42 = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer
29 store <4 x i16> %predphi, <4 x i16>* %a, align 8
30 store <4 x i16> %predphi42, <4 x i16>* %b, align 8
34 ; Improve code coverage.
36 ; When shrinking the condition used into the select to match a blend, this
37 ; test case exercises the path where the modified node is not the root
40 define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
42 ; AVX1: ## %bb.0: ## %bb
43 ; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
44 ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
45 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
46 ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
47 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
48 ; AVX1-NEXT: movq (%rdi,%rsi,8), %rax
49 ; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [5.0E-1,5.0E-1,5.0E-1,5.0E-1]
50 ; AVX1-NEXT: vblendvpd %ymm0, {{.*}}(%rip), %ymm1, %ymm0
51 ; AVX1-NEXT: vmovupd %ymm0, (%rax)
52 ; AVX1-NEXT: vzeroupper
56 ; AVX2: ## %bb.0: ## %bb
57 ; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
58 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
59 ; AVX2-NEXT: movq (%rdi,%rsi,8), %rax
60 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
61 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [5.0E-1,5.0E-1,5.0E-1,5.0E-1]
62 ; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
63 ; AVX2-NEXT: vmovupd %ymm0, (%rax)
64 ; AVX2-NEXT: vzeroupper
67 %arrayidx1928 = getelementptr inbounds double*, double** %call1559, i64 %indvars.iv4198
68 %tmp1888 = load double*, double** %arrayidx1928, align 8
69 %predphi.v.v = select <4 x i1> %tmp1895, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>
70 %tmp1900 = bitcast double* %tmp1888 to <4 x double>*
71 store <4 x double> %predphi.v.v, <4 x double>* %tmp1900, align 8
75 ; For this test, we used to optimized the conditional mask for the blend, i.e.,
76 ; we shrunk some of its bits.
77 ; However, this same mask was used in another select (%predphi31) that turned out
78 ; to be optimized into a and. In that case, the conditional mask was wrong.
80 ; Make sure that the and is fed by the original mask.
82 ; <rdar://problem/18819506>
84 define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) {
87 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
88 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1431655766,1431655766,1431655766,1431655766]
89 ; AVX1-NEXT: vpmuldq %xmm4, %xmm3, %xmm3
90 ; AVX1-NEXT: vpmuldq %xmm4, %xmm0, %xmm4
91 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
92 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
93 ; AVX1-NEXT: vpsrld $31, %xmm3, %xmm4
94 ; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
95 ; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3
96 ; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
97 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
98 ; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
99 ; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm1
100 ; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
101 ; AVX1-NEXT: vmovq %xmm0, (%rdi)
102 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
103 ; AVX1-NEXT: vmovq %xmm0, (%rsi)
108 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
109 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [1431655766,1431655766,1431655766,1431655766]
110 ; AVX2-NEXT: vpmuldq %xmm4, %xmm3, %xmm3
111 ; AVX2-NEXT: vpmuldq %xmm4, %xmm0, %xmm4
112 ; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
113 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3]
114 ; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4
115 ; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3
116 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3]
117 ; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3
118 ; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
119 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
120 ; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
121 ; AVX2-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm1
122 ; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
123 ; AVX2-NEXT: vmovq %xmm0, (%rdi)
124 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
125 ; AVX2-NEXT: vmovq %xmm0, (%rsi)
127 %tmp6 = srem <4 x i32> %induction30, <i32 3, i32 3, i32 3, i32 3>
128 %tmp7 = icmp eq <4 x i32> %tmp6, zeroinitializer
129 %predphi = select <4 x i1> %tmp7, <4 x i16> %tmp3, <4 x i16> %tmp12
130 %predphi31 = select <4 x i1> %tmp7, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer
132 store <4 x i16> %predphi31, <4 x i16>* %tmp16, align 8
133 store <4 x i16> %predphi, <4 x i16>* %tmp17, align 8
137 ; We shouldn't try to lower this directly using VSELECT because we don't have
138 ; vpblendvb in AVX1, only in AVX2. Instead, it should be expanded.
140 define <32 x i8> @PR22706(<32 x i1> %x) {
141 ; AVX1-LABEL: PR22706:
143 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
144 ; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1
145 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
146 ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
147 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
148 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1
149 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
150 ; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
151 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
152 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
153 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
154 ; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
155 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
158 ; AVX2-LABEL: PR22706:
160 ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
161 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
162 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
163 ; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
164 ; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
166 %tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
170 ; Split a 256-bit select into two 128-bit selects when the operands are concatenated.
172 define void @blendv_split(<8 x i32>* %p, <8 x i32> %cond, <8 x i32> %a, <8 x i32> %x, <8 x i32> %y, <8 x i32> %z, <8 x i32> %w) {
173 ; AVX1-LABEL: blendv_split:
175 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
176 ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
177 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
178 ; AVX1-NEXT: vpslld %xmm2, %xmm4, %xmm5
179 ; AVX1-NEXT: vpslld %xmm2, %xmm1, %xmm2
180 ; AVX1-NEXT: vpslld %xmm3, %xmm4, %xmm4
181 ; AVX1-NEXT: vpslld %xmm3, %xmm1, %xmm1
182 ; AVX1-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm1
183 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
184 ; AVX1-NEXT: vblendvps %xmm0, %xmm5, %xmm4, %xmm0
185 ; AVX1-NEXT: vmovups %xmm0, 16(%rdi)
186 ; AVX1-NEXT: vmovups %xmm1, (%rdi)
187 ; AVX1-NEXT: vzeroupper
190 ; AVX2-LABEL: blendv_split:
192 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
193 ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
194 ; AVX2-NEXT: vpslld %xmm2, %ymm1, %ymm2
195 ; AVX2-NEXT: vpslld %xmm3, %ymm1, %ymm1
196 ; AVX2-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0
197 ; AVX2-NEXT: vmovups %ymm0, (%rdi)
198 ; AVX2-NEXT: vzeroupper
200 %signbits = ashr <8 x i32> %cond, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
201 %bool = trunc <8 x i32> %signbits to <8 x i1>
202 %shamt1 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> zeroinitializer
203 %shamt2 = shufflevector <8 x i32> %y, <8 x i32> undef, <8 x i32> zeroinitializer
204 %sh1 = shl <8 x i32> %a, %shamt1
205 %sh2 = shl <8 x i32> %a, %shamt2
206 %sel = select <8 x i1> %bool, <8 x i32> %sh1, <8 x i32> %sh2
207 store <8 x i32> %sel, <8 x i32>* %p, align 4