1 // RUN: %clang_cc1 -x c -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -emit-llvm -ffp-exception-behavior=strict -o - -Wall -Werror | FileCheck %s
2 // RUN: %clang_cc1 -x c++ -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -emit-llvm -ffp-exception-behavior=strict -o - -Wall -Werror | FileCheck %s
7 __m128
test_mm_cmpeq_ps(__m128 __a
, __m128 __b
) {
8 // CHECK-LABEL: test_mm_cmpeq_ps
9 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"oeq", metadata !"fpexcept.strict")
10 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
11 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
12 // CHECK-NEXT: ret <4 x float> [[BC]]
13 return _mm_cmpeq_ps(__a
, __b
);
16 __m128
test_mm_cmpge_ps(__m128 __a
, __m128 __b
) {
17 // CHECK-LABEL: test_mm_cmpge_ps
18 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
19 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
20 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
21 // CHECK-NEXT: ret <4 x float> [[BC]]
22 return _mm_cmpge_ps(__a
, __b
);
25 __m128
test_mm_cmpgt_ps(__m128 __a
, __m128 __b
) {
26 // CHECK-LABEL: test_mm_cmpgt_ps
27 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
28 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
29 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
30 // CHECK-NEXT: ret <4 x float> [[BC]]
31 return _mm_cmpgt_ps(__a
, __b
);
34 __m128
test_mm_cmple_ps(__m128 __a
, __m128 __b
) {
35 // CHECK-LABEL: test_mm_cmple_ps
36 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"ole", metadata !"fpexcept.strict")
37 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
38 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
39 // CHECK-NEXT: ret <4 x float> [[BC]]
40 return _mm_cmple_ps(__a
, __b
);
43 __m128
test_mm_cmplt_ps(__m128 __a
, __m128 __b
) {
44 // CHECK-LABEL: test_mm_cmplt_ps
45 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"olt", metadata !"fpexcept.strict")
46 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
47 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
48 // CHECK-NEXT: ret <4 x float> [[BC]]
49 return _mm_cmplt_ps(__a
, __b
);
52 __m128
test_mm_cmpneq_ps(__m128 __a
, __m128 __b
) {
53 // CHECK-LABEL: test_mm_cmpneq_ps
54 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"une", metadata !"fpexcept.strict")
55 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
56 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
57 // CHECK-NEXT: ret <4 x float> [[BC]]
58 return _mm_cmpneq_ps(__a
, __b
);
61 __m128
test_mm_cmpnge_ps(__m128 __a
, __m128 __b
) {
62 // CHECK-LABEL: test_mm_cmpnge_ps
63 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"ugt", metadata !"fpexcept.strict")
64 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
65 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
66 // CHECK-NEXT: ret <4 x float> [[BC]]
67 return _mm_cmpnge_ps(__a
, __b
);
70 __m128
test_mm_cmpngt_ps(__m128 __a
, __m128 __b
) {
71 // CHECK-LABEL: test_mm_cmpngt_ps
72 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"uge", metadata !"fpexcept.strict")
73 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
74 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
75 // CHECK-NEXT: ret <4 x float> [[BC]]
76 return _mm_cmpngt_ps(__a
, __b
);
79 __m128
test_mm_cmpnle_ps(__m128 __a
, __m128 __b
) {
80 // CHECK-LABEL: test_mm_cmpnle_ps
81 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"ugt", metadata !"fpexcept.strict")
82 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
83 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
84 // CHECK-NEXT: ret <4 x float> [[BC]]
85 return _mm_cmpnle_ps(__a
, __b
);
88 __m128
test_mm_cmpnlt_ps(__m128 __a
, __m128 __b
) {
89 // CHECK-LABEL: test_mm_cmpnlt_ps
90 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"uge", metadata !"fpexcept.strict")
91 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
92 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
93 // CHECK-NEXT: ret <4 x float> [[BC]]
94 return _mm_cmpnlt_ps(__a
, __b
);
97 __m128
test_mm_cmpord_ps(__m128 __a
, __m128 __b
) {
98 // CHECK-LABEL: test_mm_cmpord_ps
99 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"ord", metadata !"fpexcept.strict")
100 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
101 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
102 // CHECK-NEXT: ret <4 x float> [[BC]]
103 return _mm_cmpord_ps(__a
, __b
);
106 __m128
test_mm_cmpunord_ps(__m128 __a
, __m128 __b
) {
107 // CHECK-LABEL: test_mm_cmpunord_ps
108 // CHECK: [[CMP:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !"uno", metadata !"fpexcept.strict")
109 // CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i32>
110 // CHECK-NEXT: [[BC:%.*]] = bitcast <4 x i32> [[SEXT]] to <4 x float>
111 // CHECK-NEXT: ret <4 x float> [[BC]]
112 return _mm_cmpunord_ps(__a
, __b
);