1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefixes=X86-SSE,X86-SSE1
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE,X86-SSE2
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE2
7 ; FNEG is defined as subtraction from -0.0.
9 ; This test verifies that we use an xor with a constant to flip the sign bits; no subtraction needed.
10 define <4 x float> @t1(<4 x float> %Q) nounwind {
13 ; X86-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
18 ; X64-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
20 %tmp = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %Q
24 ; Possibly misplaced test, but since we're checking undef scenarios...
26 define float @scalar_fsub_neg0_undef(float %x) nounwind {
27 ; X86-SSE-LABEL: scalar_fsub_neg0_undef:
32 ; X64-SSE-LABEL: scalar_fsub_neg0_undef:
35 %r = fsub float -0.0, undef
39 define float @scalar_fneg_undef(float %x) nounwind {
40 ; X86-SSE-LABEL: scalar_fneg_undef:
45 ; X64-SSE-LABEL: scalar_fneg_undef:
52 define <4 x float> @fsub_neg0_undef(<4 x float> %Q) nounwind {
53 ; X86-SSE-LABEL: fsub_neg0_undef:
57 ; X64-SSE-LABEL: fsub_neg0_undef:
60 %r = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, undef
64 define <4 x float> @fneg_undef(<4 x float> %Q) nounwind {
65 ; X86-SSE-LABEL: fneg_undef:
69 ; X64-SSE-LABEL: fneg_undef:
72 %r = fneg <4 x float> undef
76 define <4 x float> @fsub_neg0_undef_elts_undef(<4 x float> %x) {
77 ; X86-SSE-LABEL: fsub_neg0_undef_elts_undef:
81 ; X64-SSE-LABEL: fsub_neg0_undef_elts_undef:
84 %r = fsub <4 x float> <float -0.0, float undef, float undef, float -0.0>, undef
88 ; This test verifies that we generate an FP subtraction because "0.0 - x" is not an fneg.
89 define <4 x float> @t2(<4 x float> %Q) nounwind {
92 ; X86-SSE-NEXT: xorps %xmm1, %xmm1
93 ; X86-SSE-NEXT: subps %xmm0, %xmm1
94 ; X86-SSE-NEXT: movaps %xmm1, %xmm0
99 ; X64-SSE-NEXT: xorps %xmm1, %xmm1
100 ; X64-SSE-NEXT: subps %xmm0, %xmm1
101 ; X64-SSE-NEXT: movaps %xmm1, %xmm0
103 %tmp = fsub <4 x float> zeroinitializer, %Q
107 ; If we're bitcasting an integer to an FP vector, we should avoid the FPU/vector unit entirely.
108 ; Make sure that we're flipping the sign bit and only the sign bit of each float.
109 ; So instead of something like this:
111 ; xorps .LCPI2_0(%rip), %xmm0
113 ; We should generate:
114 ; movabsq (put sign bit mask in integer register))
115 ; xorq (flip sign bits)
116 ; movd (move to xmm return register)
118 define <2 x float> @fneg_bitcast(i64 %i) nounwind {
119 ; X86-SSE1-LABEL: fneg_bitcast:
121 ; X86-SSE1-NEXT: pushl %ebp
122 ; X86-SSE1-NEXT: movl %esp, %ebp
123 ; X86-SSE1-NEXT: andl $-16, %esp
124 ; X86-SSE1-NEXT: subl $16, %esp
125 ; X86-SSE1-NEXT: movl $-2147483648, %eax # imm = 0x80000000
126 ; X86-SSE1-NEXT: movl 12(%ebp), %ecx
127 ; X86-SSE1-NEXT: xorl %eax, %ecx
128 ; X86-SSE1-NEXT: movl %ecx, {{[0-9]+}}(%esp)
129 ; X86-SSE1-NEXT: xorl 8(%ebp), %eax
130 ; X86-SSE1-NEXT: movl %eax, (%esp)
131 ; X86-SSE1-NEXT: movaps (%esp), %xmm0
132 ; X86-SSE1-NEXT: movl %ebp, %esp
133 ; X86-SSE1-NEXT: popl %ebp
134 ; X86-SSE1-NEXT: retl
136 ; X86-SSE2-LABEL: fneg_bitcast:
138 ; X86-SSE2-NEXT: movl $-2147483648, %eax # imm = 0x80000000
139 ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
140 ; X86-SSE2-NEXT: xorl %eax, %ecx
141 ; X86-SSE2-NEXT: movd %ecx, %xmm1
142 ; X86-SSE2-NEXT: xorl {{[0-9]+}}(%esp), %eax
143 ; X86-SSE2-NEXT: movd %eax, %xmm0
144 ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
145 ; X86-SSE2-NEXT: retl
147 ; X64-SSE1-LABEL: fneg_bitcast:
149 ; X64-SSE1-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
150 ; X64-SSE1-NEXT: xorq %rdi, %rax
151 ; X64-SSE1-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
152 ; X64-SSE1-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
153 ; X64-SSE1-NEXT: retq
155 ; X64-SSE2-LABEL: fneg_bitcast:
157 ; X64-SSE2-NEXT: movabsq $-9223372034707292160, %rax # imm = 0x8000000080000000
158 ; X64-SSE2-NEXT: xorq %rdi, %rax
159 ; X64-SSE2-NEXT: movq %rax, %xmm0
160 ; X64-SSE2-NEXT: retq
161 %bitcast = bitcast i64 %i to <2 x float>
162 %fneg = fsub <2 x float> <float -0.0, float -0.0>, %bitcast
163 ret <2 x float> %fneg
166 define <4 x float> @fneg_undef_elts_v4f32(<4 x float> %x) {
167 ; X86-SSE-LABEL: fneg_undef_elts_v4f32:
169 ; X86-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
172 ; X64-SSE-LABEL: fneg_undef_elts_v4f32:
174 ; X64-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
176 %r = fsub <4 x float> <float -0.0, float undef, float undef, float -0.0>, %x
180 ; This isn't fneg, but similarly check that (X - 0.0) is simplified.
182 define <4 x float> @fsub0_undef_elts_v4f32(<4 x float> %x) {
183 ; X86-SSE-LABEL: fsub0_undef_elts_v4f32:
187 ; X64-SSE-LABEL: fsub0_undef_elts_v4f32:
190 %r = fsub <4 x float> %x, <float 0.0, float undef, float 0.0, float undef>
194 define <4 x float> @fneg(<4 x float> %Q) nounwind {
195 ; X86-SSE-LABEL: fneg:
197 ; X86-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
200 ; X64-SSE-LABEL: fneg:
202 ; X64-SSE-NEXT: xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
204 %tmp = fneg <4 x float> %Q