1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
9 declare <vscale x 2 x i32> @llvm.fptosi.sat.nxv2f32.nxv2i32(<vscale x 2 x float>)
10 declare <vscale x 4 x i32> @llvm.fptosi.sat.nxv4f32.nxv4i32(<vscale x 4 x float>)
11 declare <vscale x 8 x i32> @llvm.fptosi.sat.nxv8f32.nxv8i32(<vscale x 8 x float>)
12 declare <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f32.nxv4i16(<vscale x 4 x float>)
13 declare <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f32.nxv8i16(<vscale x 8 x float>)
14 declare <vscale x 2 x i64> @llvm.fptosi.sat.nxv2f32.nxv2i64(<vscale x 2 x float>)
15 declare <vscale x 4 x i64> @llvm.fptosi.sat.nxv4f32.nxv4i64(<vscale x 4 x float>)
17 define <vscale x 2 x i32> @test_signed_v2f32_v2i32(<vscale x 2 x float> %f) {
18 ; CHECK-LABEL: test_signed_v2f32_v2i32:
20 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
21 ; CHECK-NEXT: vmfne.vv v0, v8, v8
22 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
23 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
25 %x = call <vscale x 2 x i32> @llvm.fptosi.sat.nxv2f32.nxv2i32(<vscale x 2 x float> %f)
26 ret <vscale x 2 x i32> %x
29 define <vscale x 4 x i32> @test_signed_v4f32_v4i32(<vscale x 4 x float> %f) {
30 ; CHECK-LABEL: test_signed_v4f32_v4i32:
32 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
33 ; CHECK-NEXT: vmfne.vv v0, v8, v8
34 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
35 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
37 %x = call <vscale x 4 x i32> @llvm.fptosi.sat.nxv4f32.nxv4i32(<vscale x 4 x float> %f)
38 ret <vscale x 4 x i32> %x
41 define <vscale x 8 x i32> @test_signed_v8f32_v8i32(<vscale x 8 x float> %f) {
42 ; CHECK-LABEL: test_signed_v8f32_v8i32:
44 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
45 ; CHECK-NEXT: vmfne.vv v0, v8, v8
46 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
47 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
49 %x = call <vscale x 8 x i32> @llvm.fptosi.sat.nxv8f32.nxv8i32(<vscale x 8 x float> %f)
50 ret <vscale x 8 x i32> %x
53 define <vscale x 4 x i16> @test_signed_v4f32_v4i16(<vscale x 4 x float> %f) {
54 ; CHECK-LABEL: test_signed_v4f32_v4i16:
56 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
57 ; CHECK-NEXT: vmfne.vv v0, v8, v8
58 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
59 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
60 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0
62 %x = call <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f32.nxv4i16(<vscale x 4 x float> %f)
63 ret <vscale x 4 x i16> %x
66 define <vscale x 8 x i16> @test_signed_v8f32_v8i16(<vscale x 8 x float> %f) {
67 ; CHECK-LABEL: test_signed_v8f32_v8i16:
69 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
70 ; CHECK-NEXT: vmfne.vv v0, v8, v8
71 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
72 ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
73 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0
75 %x = call <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f32.nxv8i16(<vscale x 8 x float> %f)
76 ret <vscale x 8 x i16> %x
79 define <vscale x 2 x i64> @test_signed_v2f32_v2i64(<vscale x 2 x float> %f) {
80 ; CHECK-LABEL: test_signed_v2f32_v2i64:
82 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
83 ; CHECK-NEXT: vmfne.vv v0, v8, v8
84 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
85 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
86 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0
88 %x = call <vscale x 2 x i64> @llvm.fptosi.sat.nxv2f32.nxv2i64(<vscale x 2 x float> %f)
89 ret <vscale x 2 x i64> %x
92 define <vscale x 4 x i64> @test_signed_v4f32_v4i64(<vscale x 4 x float> %f) {
93 ; CHECK-LABEL: test_signed_v4f32_v4i64:
95 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
96 ; CHECK-NEXT: vmfne.vv v0, v8, v8
97 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
98 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
99 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0
101 %x = call <vscale x 4 x i64> @llvm.fptosi.sat.nxv4f32.nxv4i64(<vscale x 4 x float> %f)
102 ret <vscale x 4 x i64> %x
107 declare <vscale x 2 x i32> @llvm.fptosi.sat.nxv2f64.nxv2i32(<vscale x 2 x double>)
108 declare <vscale x 4 x i32> @llvm.fptosi.sat.nxv4f64.nxv4i32(<vscale x 4 x double>)
109 declare <vscale x 8 x i32> @llvm.fptosi.sat.nxv8f64.nxv8i32(<vscale x 8 x double>)
110 declare <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f64.nxv4i16(<vscale x 4 x double>)
111 declare <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f64.nxv8i16(<vscale x 8 x double>)
112 declare <vscale x 2 x i64> @llvm.fptosi.sat.nxv2f64.nxv2i64(<vscale x 2 x double>)
113 declare <vscale x 4 x i64> @llvm.fptosi.sat.nxv4f64.nxv4i64(<vscale x 4 x double>)
115 define <vscale x 2 x i32> @test_signed_v2f64_v2i32(<vscale x 2 x double> %f) {
116 ; CHECK-LABEL: test_signed_v2f64_v2i32:
118 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
119 ; CHECK-NEXT: vmfne.vv v0, v8, v8
120 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
121 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
122 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0
124 %x = call <vscale x 2 x i32> @llvm.fptosi.sat.nxv2f64.nxv2i32(<vscale x 2 x double> %f)
125 ret <vscale x 2 x i32> %x
128 define <vscale x 4 x i32> @test_signed_v4f64_v4i32(<vscale x 4 x double> %f) {
129 ; CHECK-LABEL: test_signed_v4f64_v4i32:
131 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
132 ; CHECK-NEXT: vmfne.vv v0, v8, v8
133 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
134 ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
135 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0
137 %x = call <vscale x 4 x i32> @llvm.fptosi.sat.nxv4f64.nxv4i32(<vscale x 4 x double> %f)
138 ret <vscale x 4 x i32> %x
141 define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
142 ; CHECK-LABEL: test_signed_v8f64_v8i32:
144 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
145 ; CHECK-NEXT: vmfne.vv v0, v8, v8
146 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
147 ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
148 ; CHECK-NEXT: vmerge.vim v8, v16, 0, v0
150 %x = call <vscale x 8 x i32> @llvm.fptosi.sat.nxv8f64.nxv8i32(<vscale x 8 x double> %f)
151 ret <vscale x 8 x i32> %x
154 define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
155 ; CHECK-LABEL: test_signed_v4f64_v4i16:
157 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
158 ; CHECK-NEXT: vmfne.vv v0, v8, v8
159 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
160 ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
161 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
162 ; CHECK-NEXT: vnclip.wi v8, v12, 0
163 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
165 %x = call <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f64.nxv4i16(<vscale x 4 x double> %f)
166 ret <vscale x 4 x i16> %x
169 define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
170 ; CHECK-LABEL: test_signed_v8f64_v8i16:
172 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
173 ; CHECK-NEXT: vmfne.vv v0, v8, v8
174 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
175 ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
176 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
177 ; CHECK-NEXT: vnclip.wi v8, v16, 0
178 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
180 %x = call <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f64.nxv8i16(<vscale x 8 x double> %f)
181 ret <vscale x 8 x i16> %x
184 define <vscale x 2 x i64> @test_signed_v2f64_v2i64(<vscale x 2 x double> %f) {
185 ; CHECK-LABEL: test_signed_v2f64_v2i64:
187 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
188 ; CHECK-NEXT: vmfne.vv v0, v8, v8
189 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
190 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
192 %x = call <vscale x 2 x i64> @llvm.fptosi.sat.nxv2f64.nxv2i64(<vscale x 2 x double> %f)
193 ret <vscale x 2 x i64> %x
196 define <vscale x 4 x i64> @test_signed_v4f64_v4i64(<vscale x 4 x double> %f) {
197 ; CHECK-LABEL: test_signed_v4f64_v4i64:
199 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
200 ; CHECK-NEXT: vmfne.vv v0, v8, v8
201 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
202 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
204 %x = call <vscale x 4 x i64> @llvm.fptosi.sat.nxv4f64.nxv4i64(<vscale x 4 x double> %f)
205 ret <vscale x 4 x i64> %x
211 declare <vscale x 2 x i32> @llvm.fptosi.sat.nxv2f16.nxv2i32(<vscale x 2 x half>)
212 declare <vscale x 4 x i32> @llvm.fptosi.sat.nxv4f16.nxv4i32(<vscale x 4 x half>)
213 declare <vscale x 8 x i32> @llvm.fptosi.sat.nxv8f16.nxv8i32(<vscale x 8 x half>)
214 declare <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f16.nxv4i16(<vscale x 4 x half>)
215 declare <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f16.nxv8i16(<vscale x 8 x half>)
216 declare <vscale x 2 x i64> @llvm.fptosi.sat.nxv2f16.nxv2i64(<vscale x 2 x half>)
217 declare <vscale x 4 x i64> @llvm.fptosi.sat.nxv4f16.nxv4i64(<vscale x 4 x half>)
219 define <vscale x 2 x i32> @test_signed_v2f16_v2i32(<vscale x 2 x half> %f) {
220 ; CHECK-LABEL: test_signed_v2f16_v2i32:
222 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
223 ; CHECK-NEXT: vmfne.vv v0, v8, v8
224 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
225 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
226 ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0
228 %x = call <vscale x 2 x i32> @llvm.fptosi.sat.nxv2f16.nxv2i32(<vscale x 2 x half> %f)
229 ret <vscale x 2 x i32> %x
232 define <vscale x 4 x i32> @test_signed_v4f16_v4i32(<vscale x 4 x half> %f) {
233 ; CHECK-LABEL: test_signed_v4f16_v4i32:
235 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
236 ; CHECK-NEXT: vmfne.vv v0, v8, v8
237 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8
238 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
239 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0
241 %x = call <vscale x 4 x i32> @llvm.fptosi.sat.nxv4f16.nxv4i32(<vscale x 4 x half> %f)
242 ret <vscale x 4 x i32> %x
245 define <vscale x 8 x i32> @test_signed_v8f16_v8i32(<vscale x 8 x half> %f) {
246 ; CHECK-LABEL: test_signed_v8f16_v8i32:
248 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
249 ; CHECK-NEXT: vmfne.vv v0, v8, v8
250 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
251 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
252 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0
254 %x = call <vscale x 8 x i32> @llvm.fptosi.sat.nxv8f16.nxv8i32(<vscale x 8 x half> %f)
255 ret <vscale x 8 x i32> %x
258 define <vscale x 4 x i16> @test_signed_v4f16_v4i16(<vscale x 4 x half> %f) {
259 ; CHECK-LABEL: test_signed_v4f16_v4i16:
261 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
262 ; CHECK-NEXT: vmfne.vv v0, v8, v8
263 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
264 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
266 %x = call <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f16.nxv4i16(<vscale x 4 x half> %f)
267 ret <vscale x 4 x i16> %x
270 define <vscale x 8 x i16> @test_signed_v8f16_v8i16(<vscale x 8 x half> %f) {
271 ; CHECK-LABEL: test_signed_v8f16_v8i16:
273 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
274 ; CHECK-NEXT: vmfne.vv v0, v8, v8
275 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
276 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
278 %x = call <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f16.nxv8i16(<vscale x 8 x half> %f)
279 ret <vscale x 8 x i16> %x
282 define <vscale x 2 x i64> @test_signed_v2f16_v2i64(<vscale x 2 x half> %f) {
283 ; CHECK-LABEL: test_signed_v2f16_v2i64:
285 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
286 ; CHECK-NEXT: vmfne.vv v0, v8, v8
287 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
288 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
289 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v9
290 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
291 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0
293 %x = call <vscale x 2 x i64> @llvm.fptosi.sat.nxv2f16.nxv2i64(<vscale x 2 x half> %f)
294 ret <vscale x 2 x i64> %x
297 define <vscale x 4 x i64> @test_signed_v4f16_v4i64(<vscale x 4 x half> %f) {
298 ; CHECK-LABEL: test_signed_v4f16_v4i64:
300 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
301 ; CHECK-NEXT: vmfne.vv v0, v8, v8
302 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8
303 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
304 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v10
305 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
306 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0
308 %x = call <vscale x 4 x i64> @llvm.fptosi.sat.nxv4f16.nxv4i64(<vscale x 4 x half> %f)
309 ret <vscale x 4 x i64> %x