1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
6 ; NOTE: this is generated by utils/update_llc_test_checks.py but we can't check NAN types (PR30443),
7 ; so we need to edit it to remove the NAN constant comments
11 define float @combine_fabs_constant() {
12 ; SSE-LABEL: combine_fabs_constant:
14 ; SSE-NEXT: movss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
17 ; AVX-LABEL: combine_fabs_constant:
19 ; AVX-NEXT: vmovss {{.*#+}} xmm0 = [2.0E+0,0.0E+0,0.0E+0,0.0E+0]
21 %1 = call float @llvm.fabs.f32(float -2.0)
25 define <4 x float> @combine_vec_fabs_constant() {
26 ; SSE-LABEL: combine_vec_fabs_constant:
28 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [0.0E+0,0.0E+0,2.0E+0,2.0E+0]
31 ; AVX-LABEL: combine_vec_fabs_constant:
33 ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0.0E+0,0.0E+0,2.0E+0,2.0E+0]
35 %1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> <float 0.0, float -0.0, float 2.0, float -2.0>)
39 ; fabs(fabs(x)) -> fabs(x)
40 define float @combine_fabs_fabs(float %a) {
41 ; SSE-LABEL: combine_fabs_fabs:
43 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
46 ; AVX-LABEL: combine_fabs_fabs:
48 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
49 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
51 %1 = call float @llvm.fabs.f32(float %a)
52 %2 = call float @llvm.fabs.f32(float %1)
56 define <4 x float> @combine_vec_fabs_fabs(<4 x float> %a) {
57 ; SSE-LABEL: combine_vec_fabs_fabs:
59 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
62 ; AVX-LABEL: combine_vec_fabs_fabs:
64 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
65 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
67 %1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a)
68 %2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %1)
72 ; fabs(fneg(x)) -> fabs(x)
73 define float @combine_fabs_fneg(float %a) {
74 ; SSE-LABEL: combine_fabs_fneg:
76 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
79 ; AVX-LABEL: combine_fabs_fneg:
81 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
82 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
84 %1 = fsub float -0.0, %a
85 %2 = call float @llvm.fabs.f32(float %1)
89 define <4 x float> @combine_vec_fabs_fneg(<4 x float> %a) {
90 ; SSE-LABEL: combine_vec_fabs_fneg:
92 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
95 ; AVX-LABEL: combine_vec_fabs_fneg:
97 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
98 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
100 %1 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %a
101 %2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %1)
105 ; fabs(fcopysign(x, y)) -> fabs(x)
106 define float @combine_fabs_fcopysign(float %a, float %b) {
107 ; SSE-LABEL: combine_fabs_fcopysign:
109 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
112 ; AVX-LABEL: combine_fabs_fcopysign:
114 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
115 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
117 %1 = call float @llvm.copysign.f32(float %a, float %b)
118 %2 = call float @llvm.fabs.f32(float %1)
122 define <4 x float> @combine_vec_fabs_fcopysign(<4 x float> %a, <4 x float> %b) {
123 ; SSE-LABEL: combine_vec_fabs_fcopysign:
125 ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
128 ; AVX-LABEL: combine_vec_fabs_fcopysign:
130 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
131 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
133 %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b)
134 %2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %1)
138 declare float @llvm.fabs.f32(float %p)
139 declare float @llvm.copysign.f32(float %Mag, float %Sgn)
141 declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
142 declare <4 x float> @llvm.copysign.v4f32(<4 x float> %Mag, <4 x float> %Sgn)