1 // RUN: %clang_cc1 -emit-llvm -triple x86_64-windows-pc -ffp-exception-behavior=maytrap -o - %s | FileCheck %s --check-prefixes=CHECK,FP16
2 // RUN: %clang_cc1 -emit-llvm -triple ppc64-be -ffp-exception-behavior=maytrap -o - %s | FileCheck %s --check-prefixes=CHECK,NOFP16
4 // test to ensure that these builtins don't do the variadic promotion of float->double.
6 // Test that the constrained intrinsics are picking up the exception
7 // metadata from the AST instead of the global default from the command line.
9 #pragma float_control(except, on)
11 // CHECK-LABEL: @test_half
12 void test_half(__fp16
*H
, __fp16
*H2
) {
13 (void)__builtin_isgreater(*H
, *H2
);
14 // FP16: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
15 // FP16: call float @llvm.experimental.constrained.fpext.f32.f16(half %{{.*}}, metadata !"fpexcept.strict")
16 // CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float %{{.*}}, float %{{.*}}, metadata !"ogt", metadata !"fpexcept.strict")
17 // CHECK-NEXT: zext i1
18 (void)__builtin_isinf(*H
);
19 // NOFP16: [[LDADDR:%.*]] = load ptr, ptr %{{.*}}, align 8
20 // NOFP16-NEXT: [[IHALF:%.*]] = load i16, ptr [[LDADDR]], align 2
21 // NOFP16-NEXT: [[CONV:%.*]] = call float @llvm.convert.from.fp16.f32(i16 [[IHALF]])
22 // NOFP16-NEXT: [[RES1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[CONV]], i32 516)
23 // NOFP16-NEXT: zext i1 [[RES1]] to i32
24 // FP16: [[LDADDR:%.*]] = load ptr, ptr %{{.*}}, align 8
25 // FP16-NEXT: [[HALF:%.*]] = load half, ptr [[LDADDR]], align 2
26 // FP16-NEXT: [[RES1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[HALF]], i32 516)
27 // FP16-NEXT: zext i1 [[RES1]] to i32
30 // CHECK-LABEL: @test_mixed
31 void test_mixed(double d1
, float f2
) {
32 (void)__builtin_isgreater(d1
, f2
);
33 // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
34 // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ogt", metadata !"fpexcept.strict")
35 // CHECK-NEXT: zext i1
36 (void)__builtin_isgreaterequal(d1
, f2
);
37 // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
38 // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"oge", metadata !"fpexcept.strict")
39 // CHECK-NEXT: zext i1
40 (void)__builtin_isless(d1
, f2
);
41 // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
42 // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"olt", metadata !"fpexcept.strict")
43 // CHECK-NEXT: zext i1
44 (void)__builtin_islessequal(d1
, f2
);
45 // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
46 // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"ole", metadata !"fpexcept.strict")
47 // CHECK-NEXT: zext i1
48 (void)__builtin_islessgreater(d1
, f2
);
49 // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
50 // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"one", metadata !"fpexcept.strict")
51 // CHECK-NEXT: zext i1
52 (void)__builtin_isunordered(d1
, f2
);
53 // CHECK: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float %{{.*}}, metadata !"fpexcept.strict")
54 // CHECK-NEXT: call i1 @llvm.experimental.constrained.fcmp.f64(double %{{.*}}, double [[CONV]], metadata !"uno", metadata !"fpexcept.strict")
55 // CHECK-NEXT: zext i1