1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s
7 declare <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
9 define <vscale x 2 x half> @vfptrunc_nxv2f16_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
10 ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f32:
12 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
13 ; CHECK-NEXT: vfncvt.f.f.w v9, v8, v0.t
14 ; CHECK-NEXT: vmv1r.v v8, v9
16 %v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 %vl)
17 ret <vscale x 2 x half> %v
20 define <vscale x 2 x half> @vfptrunc_nxv2f16_nxv2f32_unmasked(<vscale x 2 x float> %a, i32 zeroext %vl) {
21 ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f32_unmasked:
23 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
24 ; CHECK-NEXT: vfncvt.f.f.w v9, v8
25 ; CHECK-NEXT: vmv1r.v v8, v9
27 %v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
28 ret <vscale x 2 x half> %v
31 declare <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
33 define <vscale x 2 x half> @vfptrunc_nxv2f16_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
34 ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f64:
36 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
37 ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8, v0.t
38 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
39 ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
41 %v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %m, i32 %vl)
42 ret <vscale x 2 x half> %v
45 define <vscale x 2 x half> @vfptrunc_nxv2f16_nxv2f64_unmasked(<vscale x 2 x double> %a, i32 zeroext %vl) {
46 ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f64_unmasked:
48 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
49 ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8
50 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
51 ; CHECK-NEXT: vfncvt.f.f.w v8, v10
53 %v = call <vscale x 2 x half> @llvm.vp.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
54 ret <vscale x 2 x half> %v
57 declare <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f64.nxv2f32(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
59 define <vscale x 2 x float> @vfptrunc_nxv2f32_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
60 ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f64:
62 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
63 ; CHECK-NEXT: vfncvt.f.f.w v10, v8, v0.t
64 ; CHECK-NEXT: vmv.v.v v8, v10
66 %v = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f64.nxv2f32(<vscale x 2 x double> %a, <vscale x 2 x i1> %m, i32 %vl)
67 ret <vscale x 2 x float> %v
70 define <vscale x 2 x float> @vfptrunc_nxv2f32_nxv2f64_unmasked(<vscale x 2 x double> %a, i32 zeroext %vl) {
71 ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f64_unmasked:
73 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
74 ; CHECK-NEXT: vfncvt.f.f.w v10, v8
75 ; CHECK-NEXT: vmv.v.v v8, v10
77 %v = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f64.nxv2f32(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
78 ret <vscale x 2 x float> %v
81 declare <vscale x 7 x float> @llvm.vp.fptrunc.nxv7f64.nxv7f32(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
83 define <vscale x 7 x float> @vfptrunc_nxv7f32_nxv7f64(<vscale x 7 x double> %a, <vscale x 7 x i1> %m, i32 zeroext %vl) {
84 ; CHECK-LABEL: vfptrunc_nxv7f32_nxv7f64:
86 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
87 ; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
88 ; CHECK-NEXT: vmv.v.v v8, v16
90 %v = call <vscale x 7 x float> @llvm.vp.fptrunc.nxv7f64.nxv7f32(<vscale x 7 x double> %a, <vscale x 7 x i1> %m, i32 %vl)
91 ret <vscale x 7 x float> %v
94 declare <vscale x 16 x float> @llvm.vp.fptrunc.nxv16f64.nxv16f32(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
96 define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double> %a, <vscale x 16 x i1> %m, i32 zeroext %vl) {
97 ; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f64:
99 ; CHECK-NEXT: addi sp, sp, -16
100 ; CHECK-NEXT: .cfi_def_cfa_offset 16
101 ; CHECK-NEXT: csrr a1, vlenb
102 ; CHECK-NEXT: slli a1, a1, 3
103 ; CHECK-NEXT: sub sp, sp, a1
104 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
105 ; CHECK-NEXT: vmv1r.v v7, v0
106 ; CHECK-NEXT: addi a1, sp, 16
107 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
108 ; CHECK-NEXT: csrr a1, vlenb
109 ; CHECK-NEXT: srli a2, a1, 3
110 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
111 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
112 ; CHECK-NEXT: sub a2, a0, a1
113 ; CHECK-NEXT: sltu a3, a0, a2
114 ; CHECK-NEXT: addi a3, a3, -1
115 ; CHECK-NEXT: and a2, a3, a2
116 ; CHECK-NEXT: addi a3, sp, 16
117 ; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
118 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
119 ; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t
120 ; CHECK-NEXT: bltu a0, a1, .LBB7_2
121 ; CHECK-NEXT: # %bb.1:
122 ; CHECK-NEXT: mv a0, a1
123 ; CHECK-NEXT: .LBB7_2:
124 ; CHECK-NEXT: vmv1r.v v0, v7
125 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
126 ; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
127 ; CHECK-NEXT: vmv8r.v v8, v16
128 ; CHECK-NEXT: csrr a0, vlenb
129 ; CHECK-NEXT: slli a0, a0, 3
130 ; CHECK-NEXT: add sp, sp, a0
131 ; CHECK-NEXT: addi sp, sp, 16
133 %v = call <vscale x 16 x float> @llvm.vp.fptrunc.nxv16f64.nxv16f32(<vscale x 16 x double> %a, <vscale x 16 x i1> %m, i32 %vl)
134 ret <vscale x 16 x float> %v
137 declare <vscale x 32 x float> @llvm.vp.fptrunc.nxv32f64.nxv32f32(<vscale x 32 x double>, <vscale x 32 x i1>, i32)
139 define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
140 ; CHECK-LABEL: vfptrunc_nxv32f32_nxv32f64:
142 ; CHECK-NEXT: addi sp, sp, -16
143 ; CHECK-NEXT: .cfi_def_cfa_offset 16
144 ; CHECK-NEXT: csrr a1, vlenb
145 ; CHECK-NEXT: slli a1, a1, 4
146 ; CHECK-NEXT: sub sp, sp, a1
147 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
148 ; CHECK-NEXT: vmv1r.v v7, v0
149 ; CHECK-NEXT: addi a1, sp, 16
150 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
151 ; CHECK-NEXT: csrr a1, vlenb
152 ; CHECK-NEXT: slli a1, a1, 3
153 ; CHECK-NEXT: add a1, sp, a1
154 ; CHECK-NEXT: addi a1, a1, 16
155 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
156 ; CHECK-NEXT: csrr a1, vlenb
157 ; CHECK-NEXT: srli a3, a1, 3
158 ; CHECK-NEXT: srli a4, a1, 2
159 ; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
160 ; CHECK-NEXT: vslidedown.vx v16, v0, a4
161 ; CHECK-NEXT: slli a4, a1, 3
162 ; CHECK-NEXT: add a4, a0, a4
163 ; CHECK-NEXT: vl8re64.v v8, (a4)
164 ; CHECK-NEXT: slli a4, a1, 1
165 ; CHECK-NEXT: sub a5, a2, a4
166 ; CHECK-NEXT: sltu a6, a2, a5
167 ; CHECK-NEXT: addi a6, a6, -1
168 ; CHECK-NEXT: and a5, a6, a5
169 ; CHECK-NEXT: sub a6, a5, a1
170 ; CHECK-NEXT: sltu a7, a5, a6
171 ; CHECK-NEXT: addi a7, a7, -1
172 ; CHECK-NEXT: vl8re64.v v24, (a0)
173 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
174 ; CHECK-NEXT: vslidedown.vx v0, v16, a3
175 ; CHECK-NEXT: and a0, a7, a6
176 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
177 ; CHECK-NEXT: vfncvt.f.f.w v20, v8, v0.t
178 ; CHECK-NEXT: bltu a5, a1, .LBB8_2
179 ; CHECK-NEXT: # %bb.1:
180 ; CHECK-NEXT: mv a5, a1
181 ; CHECK-NEXT: .LBB8_2:
182 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
183 ; CHECK-NEXT: vslidedown.vx v6, v7, a3
184 ; CHECK-NEXT: vmv1r.v v0, v16
185 ; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
186 ; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
187 ; CHECK-NEXT: bltu a2, a4, .LBB8_4
188 ; CHECK-NEXT: # %bb.3:
189 ; CHECK-NEXT: mv a2, a4
190 ; CHECK-NEXT: .LBB8_4:
191 ; CHECK-NEXT: sub a0, a2, a1
192 ; CHECK-NEXT: sltu a3, a2, a0
193 ; CHECK-NEXT: addi a3, a3, -1
194 ; CHECK-NEXT: and a0, a3, a0
195 ; CHECK-NEXT: vmv1r.v v0, v6
196 ; CHECK-NEXT: addi a3, sp, 16
197 ; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
198 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
199 ; CHECK-NEXT: vfncvt.f.f.w v28, v8, v0.t
200 ; CHECK-NEXT: bltu a2, a1, .LBB8_6
201 ; CHECK-NEXT: # %bb.5:
202 ; CHECK-NEXT: mv a2, a1
203 ; CHECK-NEXT: .LBB8_6:
204 ; CHECK-NEXT: vmv1r.v v0, v7
205 ; CHECK-NEXT: csrr a0, vlenb
206 ; CHECK-NEXT: slli a0, a0, 3
207 ; CHECK-NEXT: add a0, sp, a0
208 ; CHECK-NEXT: addi a0, a0, 16
209 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
210 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
211 ; CHECK-NEXT: vfncvt.f.f.w v24, v8, v0.t
212 ; CHECK-NEXT: vmv8r.v v8, v24
213 ; CHECK-NEXT: csrr a0, vlenb
214 ; CHECK-NEXT: slli a0, a0, 4
215 ; CHECK-NEXT: add sp, sp, a0
216 ; CHECK-NEXT: addi sp, sp, 16
218 %v = call <vscale x 32 x float> @llvm.vp.fptrunc.nxv32f64.nxv32f32(<vscale x 32 x double> %a, <vscale x 32 x i1> %m, i32 %vl)
219 ret <vscale x 32 x float> %v
222 declare <vscale x 2 x bfloat> @llvm.vp.fptrunc.nxv2bf16.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
224 define <vscale x 2 x bfloat> @vfptrunc_nxv2bf16_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
225 ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f32:
227 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
228 ; CHECK-NEXT: vfncvtbf16.f.f.w v9, v8, v0.t
229 ; CHECK-NEXT: vmv1r.v v8, v9
231 %v = call <vscale x 2 x bfloat> @llvm.vp.fptrunc.nxv2bf16.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 %vl)
232 ret <vscale x 2 x bfloat> %v
235 define <vscale x 2 x bfloat> @vfptrunc_nxv2bf16_nxv2f32_unmasked(<vscale x 2 x float> %a, i32 zeroext %vl) {
236 ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f32_unmasked:
238 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
239 ; CHECK-NEXT: vfncvtbf16.f.f.w v9, v8
240 ; CHECK-NEXT: vmv1r.v v8, v9
242 %v = call <vscale x 2 x bfloat> @llvm.vp.fptrunc.nxv2bf16.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
243 ret <vscale x 2 x bfloat> %v
246 declare <vscale x 2 x bfloat> @llvm.vp.fptrunc.nxv2bf16.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
248 define <vscale x 2 x bfloat> @vfptrunc_nxv2bf16_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
249 ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f64:
251 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
252 ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8, v0.t
253 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
254 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
256 %v = call <vscale x 2 x bfloat> @llvm.vp.fptrunc.nxv2bf16.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %m, i32 %vl)
257 ret <vscale x 2 x bfloat> %v
260 define <vscale x 2 x bfloat> @vfptrunc_nxv2bf16_nxv2f64_unmasked(<vscale x 2 x double> %a, i32 zeroext %vl) {
261 ; CHECK-LABEL: vfptrunc_nxv2bf16_nxv2f64_unmasked:
263 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
264 ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8
265 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
266 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
268 %v = call <vscale x 2 x bfloat> @llvm.vp.fptrunc.nxv2bf16.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> splat (i1 true), i32 %vl)
269 ret <vscale x 2 x bfloat> %v