1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s
11 declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
12 define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) strictfp {
13 ; CHECK-LABEL: vfptrunc_v2f64_v2f32:
15 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
16 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
17 ; CHECK-NEXT: vmv1r.v v8, v9
19 %evec = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
23 declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double>, metadata, metadata)
24 define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) strictfp {
25 ; CHECK-LABEL: vfptrunc_v2f64_v2f16:
27 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
28 ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
29 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
30 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
32 %evec = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
36 declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float>, metadata, metadata)
37 define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) strictfp {
38 ; CHECK-LABEL: vfptrunc_v2f32_v2f16:
40 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
41 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
42 ; CHECK-NEXT: vmv1r.v v8, v9
44 %evec = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
48 declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
49 define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) strictfp {
50 ; CHECK-LABEL: vfptrunc_v4f64_v4f32:
52 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
53 ; CHECK-NEXT: vfncvt.f.f.w v10, v8
54 ; CHECK-NEXT: vmv.v.v v8, v10
56 %evec = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
60 declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
61 define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) strictfp {
62 ; CHECK-LABEL: vfptrunc_v4f64_v4f16:
64 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
65 ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8
66 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
67 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
69 %evec = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
73 declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float>, metadata, metadata)
74 define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) strictfp {
75 ; CHECK-LABEL: vfptrunc_v4f32_v4f16:
77 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
78 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
79 ; CHECK-NEXT: vmv1r.v v8, v9
81 %evec = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
85 declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, metadata)
86 define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) strictfp {
87 ; CHECK-LABEL: vfptrunc_v8f64_v8f32:
89 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
90 ; CHECK-NEXT: vfncvt.f.f.w v12, v8
91 ; CHECK-NEXT: vmv.v.v v8, v12
93 %evec = call <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
97 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double>, metadata, metadata)
98 define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) strictfp {
99 ; CHECK-LABEL: vfptrunc_v8f64_v8f16:
101 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
102 ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8
103 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
104 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
106 %evec = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
110 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata)
111 define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) strictfp {
112 ; CHECK-LABEL: vfptrunc_v8f32_v8f16:
114 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
115 ; CHECK-NEXT: vfncvt.f.f.w v10, v8
116 ; CHECK-NEXT: vmv.v.v v8, v10
118 %evec = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")