1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
3 define <4 x float> @foo(<4 x float> %val, <4 x float> %test) nounwind {
4 ; CHECK-LABEL: LCPI0_0:
5 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
6 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
7 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
8 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
11 ; CHECK-NEXT: cmpeqps %xmm1, %xmm0
12 ; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
14 %cmp = fcmp oeq <4 x float> %val, %test
15 %ext = zext <4 x i1> %cmp to <4 x i32>
16 %result = sitofp <4 x i32> %ext to <4 x float>
17 ret <4 x float> %result
20 ; Make sure the operation doesn't try to get folded when the sizes don't match,
21 ; as that ends up crashing later when trying to form a bitcast operation for
23 define void @foo1(<4 x float> %val, <4 x float> %test, ptr %p) nounwind {
24 ; CHECK-LABEL: LCPI1_0:
25 ; CHECK-NEXT: .long 1 ## 0x1
26 ; CHECK-NEXT: .long 1 ## 0x1
27 ; CHECK-NEXT: .long 1 ## 0x1
28 ; CHECK-NEXT: .long 1 ## 0x1
31 ; CHECK-NEXT: cmpeqps %xmm1, %xmm0
32 ; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
33 ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
34 ; CHECK-NEXT: cvtdq2pd %xmm1, %xmm1
35 ; CHECK-NEXT: cvtdq2pd %xmm0, %xmm0
36 ; CHECK-NEXT: movaps %xmm0, (%rdi)
37 ; CHECK-NEXT: movaps %xmm1, 16(%rdi)
39 %cmp = fcmp oeq <4 x float> %val, %test
40 %ext = zext <4 x i1> %cmp to <4 x i32>
41 %result = sitofp <4 x i32> %ext to <4 x double>
42 store <4 x double> %result, ptr %p
46 ; Also test the general purpose constant folding of int->fp.
47 define void @foo2(ptr noalias %result) nounwind {
48 ; CHECK-LABEL: LCPI2_0:
49 ; CHECK-NEXT: .long 0x40800000 ## float 4
50 ; CHECK-NEXT: .long 0x40a00000 ## float 5
51 ; CHECK-NEXT: .long 0x40c00000 ## float 6
52 ; CHECK-NEXT: .long 0x40e00000 ## float 7
55 ; CHECK-NEXT: movaps {{.*#+}} xmm0 = [4.0E+0,5.0E+0,6.0E+0,7.0E+0]
56 ; CHECK-NEXT: movaps %xmm0, (%rdi)
58 %val = uitofp <4 x i32> <i32 4, i32 5, i32 6, i32 7> to <4 x float>
59 store <4 x float> %val, ptr %result
63 ; Fold explicit AND operations when the constant isn't a splat of a single
64 ; scalar value like what the zext creates.
65 define <4 x float> @foo3(<4 x float> %val, <4 x float> %test) nounwind {
66 ; CHECK-LABEL: LCPI3_0:
67 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
68 ; CHECK-NEXT: .long 0 ## 0x0
69 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
70 ; CHECK-NEXT: .long 0 ## 0x0
73 ; CHECK-NEXT: cmpeqps %xmm1, %xmm0
74 ; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
76 %cmp = fcmp oeq <4 x float> %val, %test
77 %ext = zext <4 x i1> %cmp to <4 x i32>
78 %and = and <4 x i32> %ext, <i32 255, i32 256, i32 257, i32 258>
79 %result = sitofp <4 x i32> %and to <4 x float>
80 ret <4 x float> %result
83 ; Test the general purpose constant folding of uint->fp.
84 define void @foo4(ptr noalias %result) nounwind {
85 ; CHECK-LABEL: LCPI4_0:
86 ; CHECK-NEXT: .long 0x3f800000 ## float 1
87 ; CHECK-NEXT: .long 0x42fe0000 ## float 127
88 ; CHECK-NEXT: .long 0x43000000 ## float 128
89 ; CHECK-NEXT: .long 0x437f0000 ## float 255
92 ; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,1.27E+2,1.28E+2,2.55E+2]
93 ; CHECK-NEXT: movaps %xmm0, (%rdi)
95 %val = uitofp <4 x i8> <i8 1, i8 127, i8 -128, i8 -1> to <4 x float>
96 store <4 x float> %val, ptr %result
100 ; Test when we're masking against a sign extended setcc.
101 define <4 x float> @foo5(<4 x i32> %a0, <4 x i32> %a1) {
102 ; CHECK-LABEL: LCPI5_0:
103 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
104 ; CHECK-NEXT: .long 0 ## 0x0
105 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
106 ; CHECK-NEXT: .long 0 ## 0x0
108 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
109 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
111 %1 = icmp sgt <4 x i32> %a0, %a1
112 %2 = sext <4 x i1> %1 to <4 x i32>
113 %3 = and <4 x i32> %2, <i32 1, i32 0, i32 1, i32 0>
114 %4 = uitofp <4 x i32> %3 to <4 x float>
118 ; Test when we're masking against mask arithmetic, not the setcc's directly.
119 define <4 x float> @foo6(<4 x i32> %a0, <4 x i32> %a1) {
120 ; CHECK-LABEL: LCPI6_0:
121 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
122 ; CHECK-NEXT: .long 0 ## 0x0
123 ; CHECK-NEXT: .long 1065353216 ## 0x3f800000
124 ; CHECK-NEXT: .long 0 ## 0x0
126 ; CHECK-NEXT: movdqa %xmm0, %xmm2
127 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm2
128 ; CHECK-NEXT: pxor %xmm1, %xmm1
129 ; CHECK-NEXT: pcmpgtd %xmm1, %xmm0
130 ; CHECK-NEXT: pand %xmm2, %xmm0
131 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
133 %1 = icmp sgt <4 x i32> %a0, %a1
134 %2 = icmp sgt <4 x i32> %a0, zeroinitializer
135 %3 = and <4 x i1> %1, %2
136 %4 = sext <4 x i1> %3 to <4 x i32>
137 %5 = and <4 x i32> %4, <i32 1, i32 0, i32 1, i32 0>
138 %6 = uitofp <4 x i32> %5 to <4 x float>
142 define <4 x float> @foo7(<4 x i64> %a) {
143 ; CHECK-LABEL: LCPI7_0:
144 ; CHECK-NEXT: .byte 0 ## 0x0
145 ; CHECK-NEXT: .byte 255 ## 0xff
146 ; CHECK-NEXT: .byte 0 ## 0x0
147 ; CHECK-NEXT: .byte 0 ## 0x0
148 ; CHECK-NEXT: .byte 0 ## 0x0
149 ; CHECK-NEXT: .byte 255 ## 0xff
150 ; CHECK-NEXT: .byte 0 ## 0x0
151 ; CHECK-NEXT: .byte 0 ## 0x0
152 ; CHECK-NEXT: .byte 0 ## 0x0
153 ; CHECK-NEXT: .byte 255 ## 0xff
154 ; CHECK-NEXT: .byte 0 ## 0x0
155 ; CHECK-NEXT: .byte 0 ## 0x0
156 ; CHECK-NEXT: .byte 0 ## 0x0
157 ; CHECK-NEXT: .byte 255 ## 0xff
158 ; CHECK-NEXT: .byte 0 ## 0x0
159 ; CHECK-NEXT: .byte 0 ## 0x0
162 ; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
163 ; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
164 ; CHECK-NEXT: cvtdq2ps %xmm0, %xmm0
166 %b = and <4 x i64> %a, <i64 4278255360, i64 4278255360, i64 4278255360, i64 4278255360>
167 %c = and <4 x i64> %b, <i64 65535, i64 65535, i64 65535, i64 65535>
168 %d = uitofp <4 x i64> %c to <4 x float>