1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 %s -emit-llvm -ffp-exception-behavior=maytrap -o - -triple x86_64-unknown-unknown | FileCheck %s
4 // Test that the constrained intrinsics are picking up the exception
5 // metadata from the AST instead of the global default from the command line.
6 // FIXME: these functions shouldn't trap on SNaN.
8 #pragma float_control(except, on)
10 int printf(const char *, ...);
14 // CHECK-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8
15 // CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
16 // CHECK-NEXT: store ptr [[STR:%.*]], ptr [[STR_ADDR]], align 8
17 // CHECK-NEXT: store i32 [[X:%.*]], ptr [[X_ADDR]], align 4
18 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8
19 // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[X_ADDR]], align 4
20 // CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @printf(ptr noundef @.str, ptr noundef [[TMP0]], i32 noundef [[TMP1]]) #[[ATTR4:[0-9]+]]
21 // CHECK-NEXT: ret void
23 void p(char *str
, int x
) {
24 printf("%s: %d\n", str
, x
);
27 #define P(n,args) p(#n #args, __builtin_##n args)
29 // CHECK-LABEL: @test_fpclassify(
31 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
32 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
33 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
34 // CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
35 // CHECK-NEXT: br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
36 // CHECK: fpclassify_end:
37 // CHECK-NEXT: [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
38 // CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[FPCLASSIFY_RESULT]]) #[[ATTR4]]
39 // CHECK-NEXT: ret void
40 // CHECK: fpclassify_not_zero:
41 // CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR4]]
42 // CHECK-NEXT: br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
43 // CHECK: fpclassify_not_nan:
44 // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5:[0-9]+]]
45 // CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
46 // CHECK-NEXT: br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
47 // CHECK: fpclassify_not_inf:
48 // CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR4]]
49 // CHECK-NEXT: [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
50 // CHECK-NEXT: br label [[FPCLASSIFY_END]]
52 void test_fpclassify(double d
) {
53 P(fpclassify
, (0, 1, 2, 3, 4, d
));
58 // CHECK-LABEL: @test_fp16_isinf(
60 // CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
61 // CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
62 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
63 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR4]]
64 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
65 // CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR4]]
66 // CHECK-NEXT: ret void
68 void test_fp16_isinf(_Float16 h
) {
74 // CHECK-LABEL: @test_float_isinf(
76 // CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
77 // CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
78 // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
79 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR4]]
80 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
81 // CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR4]]
82 // CHECK-NEXT: ret void
84 void test_float_isinf(float f
) {
90 // CHECK-LABEL: @test_double_isinf(
92 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
93 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
94 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
95 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR4]]
96 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
97 // CHECK-NEXT: call void @p(ptr noundef @.str.4, i32 noundef [[TMP2]]) #[[ATTR4]]
98 // CHECK-NEXT: ret void
100 void test_double_isinf(double d
) {
106 // CHECK-LABEL: @test_fp16_isfinite(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
109 // CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
110 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
111 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR4]]
112 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
113 // CHECK-NEXT: call void @p(ptr noundef @.str.5, i32 noundef [[TMP2]]) #[[ATTR4]]
114 // CHECK-NEXT: ret void
116 void test_fp16_isfinite(_Float16 h
) {
122 // CHECK-LABEL: @test_float_isfinite(
123 // CHECK-NEXT: entry:
124 // CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
125 // CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
126 // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
127 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR4]]
128 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
129 // CHECK-NEXT: call void @p(ptr noundef @.str.6, i32 noundef [[TMP2]]) #[[ATTR4]]
130 // CHECK-NEXT: ret void
132 void test_float_isfinite(float f
) {
138 // CHECK-LABEL: @test_double_isfinite(
139 // CHECK-NEXT: entry:
140 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
141 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
142 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
143 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR4]]
144 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
145 // CHECK-NEXT: call void @p(ptr noundef @.str.7, i32 noundef [[TMP2]]) #[[ATTR4]]
146 // CHECK-NEXT: ret void
148 void test_double_isfinite(double d
) {
154 // CHECK-LABEL: @test_isinf_sign(
155 // CHECK-NEXT: entry:
156 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
157 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
158 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
159 // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5]]
160 // CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
161 // CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
162 // CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
163 // CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
164 // CHECK-NEXT: [[TMP5:%.*]] = select i1 [[ISINF]], i32 [[TMP4]], i32 0
165 // CHECK-NEXT: call void @p(ptr noundef @.str.8, i32 noundef [[TMP5]]) #[[ATTR4]]
166 // CHECK-NEXT: ret void
168 void test_isinf_sign(double d
) {
174 // CHECK-LABEL: @test_fp16_isnan(
175 // CHECK-NEXT: entry:
176 // CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
177 // CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
178 // CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
179 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR4]]
180 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
181 // CHECK-NEXT: call void @p(ptr noundef @.str.9, i32 noundef [[TMP2]]) #[[ATTR4]]
182 // CHECK-NEXT: ret void
184 void test_fp16_isnan(_Float16 h
) {
190 // CHECK-LABEL: @test_float_isnan(
191 // CHECK-NEXT: entry:
192 // CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
193 // CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
194 // CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
195 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR4]]
196 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
197 // CHECK-NEXT: call void @p(ptr noundef @.str.10, i32 noundef [[TMP2]]) #[[ATTR4]]
198 // CHECK-NEXT: ret void
200 void test_float_isnan(float f
) {
206 // CHECK-LABEL: @test_double_isnan(
207 // CHECK-NEXT: entry:
208 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
209 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
210 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
211 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR4]]
212 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
213 // CHECK-NEXT: call void @p(ptr noundef @.str.11, i32 noundef [[TMP2]]) #[[ATTR4]]
214 // CHECK-NEXT: ret void
216 void test_double_isnan(double d
) {
222 // CHECK-LABEL: @test_isnormal(
223 // CHECK-NEXT: entry:
224 // CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
225 // CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
226 // CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
227 // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR4]]
228 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
229 // CHECK-NEXT: call void @p(ptr noundef @.str.12, i32 noundef [[TMP2]]) #[[ATTR4]]
230 // CHECK-NEXT: ret void
232 void test_isnormal(double d
) {