1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
5 define void @fpext_v2f16_v2f32(ptr %x, ptr %y) {
6 ; CHECK-LABEL: fpext_v2f16_v2f32:
8 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
9 ; CHECK-NEXT: vle16.v v8, (a0)
10 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
11 ; CHECK-NEXT: vse32.v v9, (a1)
13 %a = load <2 x half>, ptr %x
14 %d = fpext <2 x half> %a to <2 x float>
15 store <2 x float> %d, ptr %y
19 define void @fpext_v2f16_v2f64(ptr %x, ptr %y) {
20 ; CHECK-LABEL: fpext_v2f16_v2f64:
22 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
23 ; CHECK-NEXT: vle16.v v8, (a0)
24 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
25 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
26 ; CHECK-NEXT: vfwcvt.f.f.v v8, v9
27 ; CHECK-NEXT: vse64.v v8, (a1)
29 %a = load <2 x half>, ptr %x
30 %d = fpext <2 x half> %a to <2 x double>
31 store <2 x double> %d, ptr %y
35 define void @fpext_v8f16_v8f32(ptr %x, ptr %y) {
36 ; CHECK-LABEL: fpext_v8f16_v8f32:
38 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
39 ; CHECK-NEXT: vle16.v v8, (a0)
40 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8
41 ; CHECK-NEXT: vse32.v v10, (a1)
43 %a = load <8 x half>, ptr %x
44 %d = fpext <8 x half> %a to <8 x float>
45 store <8 x float> %d, ptr %y
49 define void @fpext_v8f16_v8f64(ptr %x, ptr %y) {
50 ; CHECK-LABEL: fpext_v8f16_v8f64:
52 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
53 ; CHECK-NEXT: vle16.v v8, (a0)
54 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8
55 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
56 ; CHECK-NEXT: vfwcvt.f.f.v v12, v10
57 ; CHECK-NEXT: vse64.v v12, (a1)
59 %a = load <8 x half>, ptr %x
60 %d = fpext <8 x half> %a to <8 x double>
61 store <8 x double> %d, ptr %y
65 define void @fpround_v2f32_v2f16(ptr %x, ptr %y) {
66 ; CHECK-LABEL: fpround_v2f32_v2f16:
68 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
69 ; CHECK-NEXT: vle32.v v8, (a0)
70 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
71 ; CHECK-NEXT: vse16.v v9, (a1)
73 %a = load <2 x float>, ptr %x
74 %d = fptrunc <2 x float> %a to <2 x half>
75 store <2 x half> %d, ptr %y
79 define void @fpround_v2f64_v2f16(ptr %x, ptr %y) {
80 ; CHECK-LABEL: fpround_v2f64_v2f16:
82 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
83 ; CHECK-NEXT: vle64.v v8, (a0)
84 ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
85 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
86 ; CHECK-NEXT: vfncvt.f.f.w v8, v9
87 ; CHECK-NEXT: vse16.v v8, (a1)
89 %a = load <2 x double>, ptr %x
90 %d = fptrunc <2 x double> %a to <2 x half>
91 store <2 x half> %d, ptr %y
95 define void @fpround_v8f32_v8f16(ptr %x, ptr %y) {
96 ; CHECK-LABEL: fpround_v8f32_v8f16:
98 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
99 ; CHECK-NEXT: vle32.v v8, (a0)
100 ; CHECK-NEXT: vfncvt.f.f.w v10, v8
101 ; CHECK-NEXT: vse16.v v10, (a1)
103 %a = load <8 x float>, ptr %x
104 %d = fptrunc <8 x float> %a to <8 x half>
105 store <8 x half> %d, ptr %y
109 define void @fpround_v8f64_v8f16(ptr %x, ptr %y) {
110 ; CHECK-LABEL: fpround_v8f64_v8f16:
112 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
113 ; CHECK-NEXT: vle64.v v8, (a0)
114 ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8
115 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
116 ; CHECK-NEXT: vfncvt.f.f.w v8, v12
117 ; CHECK-NEXT: vse16.v v8, (a1)
119 %a = load <8 x double>, ptr %x
120 %d = fptrunc <8 x double> %a to <8 x half>
121 store <8 x half> %d, ptr %y