1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
5 define <2 x double> @signbits_sext_v2i64_sitofp_v2f64(i32 %a0, i32 %a1) nounwind {
6 ; X32-LABEL: signbits_sext_v2i64_sitofp_v2f64:
8 ; X32-NEXT: vcvtdq2pd {{[0-9]+}}(%esp), %xmm0
11 ; X64-LABEL: signbits_sext_v2i64_sitofp_v2f64:
13 ; X64-NEXT: vmovd %edi, %xmm0
14 ; X64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
15 ; X64-NEXT: vcvtdq2pd %xmm0, %xmm0
17 %1 = sext i32 %a0 to i64
18 %2 = sext i32 %a1 to i64
19 %3 = insertelement <2 x i64> undef, i64 %1, i32 0
20 %4 = insertelement <2 x i64> %3, i64 %2, i32 1
21 %5 = sitofp <2 x i64> %4 to <2 x double>
25 define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext %a1, i32 %a2, i32 %a3) nounwind {
26 ; X32-LABEL: signbits_sext_v4i64_sitofp_v4f32:
28 ; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
29 ; X32-NEXT: movsbl {{[0-9]+}}(%esp), %ecx
30 ; X32-NEXT: vmovd %ecx, %xmm0
31 ; X32-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
32 ; X32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
33 ; X32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
34 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
37 ; X64-LABEL: signbits_sext_v4i64_sitofp_v4f32:
39 ; X64-NEXT: movslq %edi, %rax
40 ; X64-NEXT: movslq %esi, %rsi
41 ; X64-NEXT: movslq %edx, %rdx
42 ; X64-NEXT: movslq %ecx, %rcx
43 ; X64-NEXT: vmovq %rcx, %xmm0
44 ; X64-NEXT: vmovq %rdx, %xmm1
45 ; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
46 ; X64-NEXT: vmovq %rsi, %xmm1
47 ; X64-NEXT: vmovq %rax, %xmm2
48 ; X64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
49 ; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
50 ; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
52 %1 = sext i8 %a0 to i64
53 %2 = sext i16 %a1 to i64
54 %3 = sext i32 %a2 to i64
55 %4 = sext i32 %a3 to i64
56 %5 = insertelement <4 x i64> undef, i64 %1, i32 0
57 %6 = insertelement <4 x i64> %5, i64 %2, i32 1
58 %7 = insertelement <4 x i64> %6, i64 %3, i32 2
59 %8 = insertelement <4 x i64> %7, i64 %4, i32 3
60 %9 = sitofp <4 x i64> %8 to <4 x float>
64 define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind {
65 ; X32-LABEL: signbits_ashr_extract_sitofp_0:
67 ; X32-NEXT: pushl %eax
68 ; X32-NEXT: vextractps $1, %xmm0, %eax
69 ; X32-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
70 ; X32-NEXT: vmovss %xmm0, (%esp)
71 ; X32-NEXT: flds (%esp)
75 ; X64-LABEL: signbits_ashr_extract_sitofp_0:
77 ; X64-NEXT: vmovq %xmm0, %rax
78 ; X64-NEXT: shrq $32, %rax
79 ; X64-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
81 %1 = ashr <2 x i64> %a0, <i64 32, i64 32>
82 %2 = extractelement <2 x i64> %1, i32 0
83 %3 = sitofp i64 %2 to float
87 define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
88 ; X32-LABEL: signbits_ashr_extract_sitofp_1:
90 ; X32-NEXT: pushl %eax
91 ; X32-NEXT: vpsrlq $32, %xmm0, %xmm0
92 ; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,32768,0,0,1,0,0,0]
93 ; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0
94 ; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0
95 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
96 ; X32-NEXT: vmovss %xmm0, (%esp)
97 ; X32-NEXT: flds (%esp)
101 ; X64-LABEL: signbits_ashr_extract_sitofp_1:
103 ; X64-NEXT: vmovq %xmm0, %rax
104 ; X64-NEXT: shrq $32, %rax
105 ; X64-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
107 %1 = ashr <2 x i64> %a0, <i64 32, i64 63>
108 %2 = extractelement <2 x i64> %1, i32 0
109 %3 = sitofp i64 %2 to float
113 define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
114 ; X32-LABEL: signbits_ashr_shl_extract_sitofp:
116 ; X32-NEXT: pushl %eax
117 ; X32-NEXT: vpsrlq $61, %xmm0, %xmm0
118 ; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [4,0,0,0,8,0,0,0]
119 ; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0
120 ; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0
121 ; X32-NEXT: vpsllq $20, %xmm0, %xmm0
122 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
123 ; X32-NEXT: vmovss %xmm0, (%esp)
124 ; X32-NEXT: flds (%esp)
125 ; X32-NEXT: popl %eax
128 ; X64-LABEL: signbits_ashr_shl_extract_sitofp:
130 ; X64-NEXT: vmovq %xmm0, %rax
131 ; X64-NEXT: sarq $61, %rax
132 ; X64-NEXT: shll $20, %eax
133 ; X64-NEXT: vcvtsi2ssl %eax, %xmm1, %xmm0
135 %1 = ashr <2 x i64> %a0, <i64 61, i64 60>
136 %2 = shl <2 x i64> %1, <i64 20, i64 16>
137 %3 = extractelement <2 x i64> %2, i32 0
138 %4 = sitofp i64 %3 to float
142 define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwind {
143 ; X32-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
145 ; X32-NEXT: pushl %eax
146 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
147 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
148 ; X32-NEXT: shrdl $30, %ecx, %eax
149 ; X32-NEXT: sarl $30, %ecx
150 ; X32-NEXT: vmovd %eax, %xmm0
151 ; X32-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
152 ; X32-NEXT: vpsrlq $3, %xmm0, %xmm0
153 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
154 ; X32-NEXT: vmovss %xmm0, (%esp)
155 ; X32-NEXT: flds (%esp)
156 ; X32-NEXT: popl %eax
159 ; X64-LABEL: signbits_ashr_insert_ashr_extract_sitofp:
161 ; X64-NEXT: sarq $30, %rdi
162 ; X64-NEXT: shrq $3, %rdi
163 ; X64-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0
165 %1 = ashr i64 %a0, 30
166 %2 = insertelement <2 x i64> undef, i64 %1, i32 0
167 %3 = insertelement <2 x i64> %2, i64 %a1, i32 1
168 %4 = ashr <2 x i64> %3, <i64 3, i64 3>
169 %5 = extractelement <2 x i64> %4, i32 0
170 %6 = sitofp i64 %5 to float
174 define <4 x double> @signbits_sext_shuffle_sitofp(<4 x i32> %a0, <4 x i64> %a1) nounwind {
175 ; X32-LABEL: signbits_sext_shuffle_sitofp:
177 ; X32-NEXT: vpmovsxdq %xmm0, %xmm1
178 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
179 ; X32-NEXT: vpmovsxdq %xmm0, %xmm0
180 ; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
181 ; X32-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
182 ; X32-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
183 ; X32-NEXT: vextractf128 $1, %ymm0, %xmm1
184 ; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
185 ; X32-NEXT: vcvtdq2pd %xmm0, %ymm0
188 ; X64-LABEL: signbits_sext_shuffle_sitofp:
190 ; X64-NEXT: vpmovsxdq %xmm0, %xmm1
191 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
192 ; X64-NEXT: vpmovsxdq %xmm0, %xmm0
193 ; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
194 ; X64-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
195 ; X64-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
196 ; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
197 ; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
198 ; X64-NEXT: vcvtdq2pd %xmm0, %ymm0
200 %1 = sext <4 x i32> %a0 to <4 x i64>
201 %2 = shufflevector <4 x i64> %1, <4 x i64>%a1, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
202 %3 = sitofp <4 x i64> %2 to <4 x double>
206 ; TODO: Fix vpshufd+vpsrlq -> vpshufd/vpermilps
207 define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 x i64> %a1) nounwind {
208 ; X32-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
210 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,3,2,3]
211 ; X32-NEXT: vcvtdq2pd %xmm0, %xmm0
214 ; X64-LABEL: signbits_ashr_concat_ashr_extract_sitofp:
216 ; X64-NEXT: vpsrlq $32, %xmm0, %xmm0
217 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
218 ; X64-NEXT: vcvtdq2pd %xmm0, %xmm0
220 %1 = ashr <2 x i64> %a0, <i64 16, i64 16>
221 %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
222 %3 = shufflevector <4 x i64> %a1, <4 x i64> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
223 %4 = ashr <4 x i64> %3, <i64 16, i64 16, i64 16, i64 16>
224 %5 = shufflevector <4 x i64> %4, <4 x i64> undef, <2 x i32> <i32 2, i32 3>
225 %6 = sitofp <2 x i64> %5 to <2 x double>
229 define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 x i64> %a1, i32 %a2) nounwind {
230 ; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
232 ; X32-NEXT: pushl %eax
233 ; X32-NEXT: vpsrlq $61, %xmm0, %xmm0
234 ; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [4,0,0,0,8,0,0,0]
235 ; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0
236 ; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0
237 ; X32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
238 ; X32-NEXT: vpand %xmm1, %xmm0, %xmm0
239 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
240 ; X32-NEXT: vmovss %xmm0, (%esp)
241 ; X32-NEXT: flds (%esp)
242 ; X32-NEXT: popl %eax
245 ; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp:
247 ; X64-NEXT: vpsrlq $61, %xmm0, %xmm0
248 ; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [4,8]
249 ; X64-NEXT: vpxor %xmm1, %xmm0, %xmm0
250 ; X64-NEXT: vpsubq %xmm1, %xmm0, %xmm0
251 ; X64-NEXT: movslq %edi, %rax
252 ; X64-NEXT: vmovq %rax, %xmm1
253 ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
254 ; X64-NEXT: vmovq %xmm0, %rax
255 ; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
257 %1 = ashr <2 x i64> %a0, <i64 61, i64 60>
258 %2 = sext i32 %a2 to i64
259 %3 = insertelement <2 x i64> %a1, i64 %2, i32 0
260 %4 = shl <2 x i64> %3, <i64 20, i64 20>
261 %5 = ashr <2 x i64> %4, <i64 20, i64 20>
262 %6 = and <2 x i64> %1, %5
263 %7 = extractelement <2 x i64> %6, i32 0
264 %8 = sitofp i64 %7 to float
268 define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind {
269 ; X32-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
271 ; X32-NEXT: pushl %eax
272 ; X32-NEXT: vpsrlq $60, %xmm0, %xmm2
273 ; X32-NEXT: vpsrlq $61, %xmm0, %xmm0
274 ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
275 ; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [4,0,0,0,8,0,0,0]
276 ; X32-NEXT: vpxor %xmm2, %xmm0, %xmm0
277 ; X32-NEXT: vpsubq %xmm2, %xmm0, %xmm0
278 ; X32-NEXT: vpmovsxdq %xmm1, %xmm1
279 ; X32-NEXT: vpand %xmm1, %xmm0, %xmm2
280 ; X32-NEXT: vpor %xmm1, %xmm2, %xmm1
281 ; X32-NEXT: vpxor %xmm0, %xmm1, %xmm0
282 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
283 ; X32-NEXT: vmovss %xmm0, (%esp)
284 ; X32-NEXT: flds (%esp)
285 ; X32-NEXT: popl %eax
288 ; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp:
290 ; X64-NEXT: vpsrlq $60, %xmm0, %xmm2
291 ; X64-NEXT: vpsrlq $61, %xmm0, %xmm0
292 ; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
293 ; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [4,8]
294 ; X64-NEXT: vpxor %xmm2, %xmm0, %xmm0
295 ; X64-NEXT: vpsubq %xmm2, %xmm0, %xmm0
296 ; X64-NEXT: vpmovsxdq %xmm1, %xmm1
297 ; X64-NEXT: vpand %xmm1, %xmm0, %xmm2
298 ; X64-NEXT: vpor %xmm1, %xmm2, %xmm1
299 ; X64-NEXT: vpxor %xmm0, %xmm1, %xmm0
300 ; X64-NEXT: vmovq %xmm0, %rax
301 ; X64-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0
303 %1 = ashr <2 x i64> %a0, <i64 61, i64 60>
304 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
305 %3 = sext <2 x i32> %2 to <2 x i64>
306 %4 = and <2 x i64> %1, %3
307 %5 = or <2 x i64> %4, %3
308 %6 = xor <2 x i64> %5, %1
309 %7 = extractelement <2 x i64> %6, i32 0
310 %8 = sitofp i64 %7 to float
314 define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %a2, <4 x i32> %a3) nounwind {
315 ; X32-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
317 ; X32-NEXT: pushl %ebp
318 ; X32-NEXT: movl %esp, %ebp
319 ; X32-NEXT: andl $-16, %esp
320 ; X32-NEXT: subl $16, %esp
321 ; X32-NEXT: vpmovsxdq 16(%ebp), %xmm3
322 ; X32-NEXT: vpmovsxdq 8(%ebp), %xmm4
323 ; X32-NEXT: vextractf128 $1, %ymm2, %xmm5
324 ; X32-NEXT: vpsrlq $33, %xmm5, %xmm5
325 ; X32-NEXT: vmovdqa {{.*#+}} xmm6 = [0,16384,0,0,1,0,0,0]
326 ; X32-NEXT: vpxor %xmm6, %xmm5, %xmm5
327 ; X32-NEXT: vpsubq %xmm6, %xmm5, %xmm5
328 ; X32-NEXT: vpsrlq $33, %xmm2, %xmm2
329 ; X32-NEXT: vpxor %xmm6, %xmm2, %xmm2
330 ; X32-NEXT: vpsubq %xmm6, %xmm2, %xmm2
331 ; X32-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
332 ; X32-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
333 ; X32-NEXT: vextractf128 $1, %ymm1, %xmm4
334 ; X32-NEXT: vextractf128 $1, %ymm0, %xmm5
335 ; X32-NEXT: vpcmpeqq %xmm4, %xmm5, %xmm4
336 ; X32-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
337 ; X32-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
338 ; X32-NEXT: vblendvpd %ymm0, %ymm2, %ymm3, %ymm0
339 ; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
340 ; X32-NEXT: vextractf128 $1, %ymm0, %xmm1
341 ; X32-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
342 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
343 ; X32-NEXT: movl %ebp, %esp
344 ; X32-NEXT: popl %ebp
345 ; X32-NEXT: vzeroupper
348 ; X64-LABEL: signbits_ashr_sext_select_shuffle_sitofp:
350 ; X64-NEXT: vextractf128 $1, %ymm2, %xmm4
351 ; X64-NEXT: vpsrlq $33, %xmm4, %xmm4
352 ; X64-NEXT: vmovdqa {{.*#+}} xmm5 = [1073741824,1]
353 ; X64-NEXT: vpxor %xmm5, %xmm4, %xmm4
354 ; X64-NEXT: vpsubq %xmm5, %xmm4, %xmm4
355 ; X64-NEXT: vpsrlq $33, %xmm2, %xmm2
356 ; X64-NEXT: vpxor %xmm5, %xmm2, %xmm2
357 ; X64-NEXT: vpsubq %xmm5, %xmm2, %xmm2
358 ; X64-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
359 ; X64-NEXT: vpmovsxdq %xmm3, %xmm4
360 ; X64-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
361 ; X64-NEXT: vpmovsxdq %xmm3, %xmm3
362 ; X64-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
363 ; X64-NEXT: vextractf128 $1, %ymm1, %xmm4
364 ; X64-NEXT: vextractf128 $1, %ymm0, %xmm5
365 ; X64-NEXT: vpcmpeqq %xmm4, %xmm5, %xmm4
366 ; X64-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
367 ; X64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
368 ; X64-NEXT: vblendvpd %ymm0, %ymm2, %ymm3, %ymm0
369 ; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
370 ; X64-NEXT: vextractf128 $1, %ymm0, %xmm1
371 ; X64-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
372 ; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
373 ; X64-NEXT: vzeroupper
375 %1 = ashr <4 x i64> %a2, <i64 33, i64 63, i64 33, i64 63>
376 %2 = sext <4 x i32> %a3 to <4 x i64>
377 %3 = icmp eq <4 x i64> %a0, %a1
378 %4 = select <4 x i1> %3, <4 x i64> %1, <4 x i64> %2
379 %5 = shufflevector <4 x i64> %4, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
380 %6 = sitofp <4 x i64> %5 to <4 x float>
384 ; Make sure we can preserve sign bit information into the second basic block
385 ; so we can avoid having to shift bit 0 into bit 7 for each element due to
386 ; v32i1->v32i8 promotion and the splitting of v32i8 into 2xv16i8. This requires
387 ; ComputeNumSignBits handling for insert_subvector.
388 define void @cross_bb_signbits_insert_subvec(<32 x i8>* %ptr, <32 x i8> %x, <32 x i8> %z) {
389 ; X32-LABEL: cross_bb_signbits_insert_subvec:
391 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
392 ; X32-NEXT: vextractf128 $1, %ymm0, %xmm2
393 ; X32-NEXT: vpxor %xmm3, %xmm3, %xmm3
394 ; X32-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
395 ; X32-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
396 ; X32-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
397 ; X32-NEXT: vandnps %ymm1, %ymm0, %ymm1
398 ; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
399 ; X32-NEXT: vorps %ymm1, %ymm0, %ymm0
400 ; X32-NEXT: vmovaps %ymm0, (%eax)
401 ; X32-NEXT: vzeroupper
404 ; X64-LABEL: cross_bb_signbits_insert_subvec:
406 ; X64-NEXT: vextractf128 $1, %ymm0, %xmm2
407 ; X64-NEXT: vpxor %xmm3, %xmm3, %xmm3
408 ; X64-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
409 ; X64-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
410 ; X64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
411 ; X64-NEXT: vandnps %ymm1, %ymm0, %ymm1
412 ; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
413 ; X64-NEXT: vorps %ymm1, %ymm0, %ymm0
414 ; X64-NEXT: vmovaps %ymm0, (%rdi)
415 ; X64-NEXT: vzeroupper
417 %a = icmp eq <32 x i8> %x, zeroinitializer
418 %b = icmp eq <32 x i8> %x, zeroinitializer
419 %c = and <32 x i1> %a, %b
423 %d = select <32 x i1> %c, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> %z
424 store <32 x i8> %d, <32 x i8>* %ptr, align 32