1 // REQUIRES: x86-registered-target
2 // RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
3 // RUN: %clang_cc1 -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s
4 // RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -target-feature +avx512vl -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
5 // RUN: %clang_cc1 -flax-vector-conversions=none -fms-compatibility -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +avx512f -target-feature +avx512vl -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s
9 __m128
test_mm_mask_cvtph_ps(__m128 __W
, __mmask8 __U
, __m128i __A
) {
10 // COMMON-LABEL: @test_mm_mask_cvtph_ps
11 // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
12 // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
13 // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
14 // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
15 // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
16 // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
17 return _mm_mask_cvtph_ps(__W
, __U
, __A
);
20 __m128
test_mm_maskz_cvtph_ps(__mmask8 __U
, __m128i __A
) {
21 // COMMON-LABEL: @test_mm_maskz_cvtph_ps
22 // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
23 // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
24 // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
25 // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
26 // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
27 // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
28 return _mm_maskz_cvtph_ps(__U
, __A
);
31 __m256
test_mm256_mask_cvtph_ps(__m256 __W
, __mmask8 __U
, __m128i __A
) {
32 // COMMON-LABEL: @test_mm256_mask_cvtph_ps
33 // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
34 // COMMONIR: bitcast <8 x i16> %{{.*}} to <8 x half>
35 // UNCONSTRAINED: fpext <8 x half> %{{.*}} to <8 x float>
36 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %{{.*}}, metadata !"fpexcept.strict")
37 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
38 return _mm256_mask_cvtph_ps(__W
, __U
, __A
);
41 __m256
test_mm256_maskz_cvtph_ps(__mmask8 __U
, __m128i __A
) {
42 // COMMON-LABEL: @test_mm256_maskz_cvtph_ps
43 // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
44 // COMMONIR: bitcast <8 x i16> %{{.*}} to <8 x half>
45 // UNCONSTRAINED: fpext <8 x half> %{{.*}} to <8 x float>
46 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %{{.*}}, metadata !"fpexcept.strict")
47 // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
48 return _mm256_maskz_cvtph_ps(__U
, __A
);
51 __m128i
test_mm_mask_cvtps_ph(__m128i __W
, __mmask8 __U
, __m128 __A
) {
52 // COMMON-LABEL: @test_mm_mask_cvtps_ph
53 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.128
54 return _mm_mask_cvtps_ph(__W
, __U
, __A
, _MM_FROUND_TO_ZERO
| _MM_FROUND_NO_EXC
);
57 __m128i
test_mm_maskz_cvtps_ph(__mmask8 __U
, __m128 __A
) {
58 // COMMON-LABEL: @test_mm_maskz_cvtps_ph
59 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.128
60 return _mm_maskz_cvtps_ph(__U
, __A
, _MM_FROUND_TO_ZERO
| _MM_FROUND_NO_EXC
);
63 __m128i
test_mm256_mask_cvtps_ph(__m128i __W
, __mmask8 __U
, __m256 __A
) {
64 // COMMON-LABEL: @test_mm256_mask_cvtps_ph
65 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.256
66 return _mm256_mask_cvtps_ph(__W
, __U
, __A
, _MM_FROUND_TO_ZERO
| _MM_FROUND_NO_EXC
);
69 __m128i
test_mm256_maskz_cvtps_ph(__mmask8 __U
, __m256 __A
) {
70 // COMMON-LABEL: @test_mm256_maskz_cvtps_ph
71 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.256
72 return _mm256_maskz_cvtps_ph(__U
, __A
, _MM_FROUND_TO_ZERO
| _MM_FROUND_NO_EXC
);
75 __m128i
test_mm_mask_cvt_roundps_ph(__m128i __W
, __mmask8 __U
, __m128 __A
) {
76 // COMMON-LABEL: @test_mm_mask_cvt_roundps_ph
77 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.128
78 return _mm_mask_cvt_roundps_ph(__W
, __U
, __A
, _MM_FROUND_TO_ZERO
);
81 __m128i
test_mm_maskz_cvt_roundps_ph(__mmask8 __U
, __m128 __A
) {
82 // COMMON-LABEL: @test_mm_maskz_cvt_roundps_ph
83 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.128
84 return _mm_maskz_cvt_roundps_ph(__U
, __A
, _MM_FROUND_TO_ZERO
);
87 __m128i
test_mm256_mask_cvt_roundps_ph(__m128i __W
, __mmask8 __U
, __m256 __A
) {
88 // COMMON-LABEL: @test_mm256_mask_cvt_roundps_ph
89 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.256
90 return _mm256_mask_cvt_roundps_ph(__W
, __U
, __A
, _MM_FROUND_TO_ZERO
);
93 __m128i
test_mm256_maskz_cvt_roundps_ph(__mmask8 __U
, __m256 __A
) {
94 // COMMON-LABEL: @test_mm256_maskz_cvt_roundps_ph
95 // COMMONIR: @llvm.x86.avx512.mask.vcvtps2ph.256
96 return _mm256_maskz_cvt_roundps_ph(__U
, __A
, _MM_FROUND_TO_ZERO
);