1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH32
3 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH64
4 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32
5 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64
7 define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) {
8 ; CHECK-LABEL: fp2si_v2f32_v2i32:
10 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
11 ; CHECK-NEXT: vle32.v v8, (a0)
12 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
13 ; CHECK-NEXT: vse32.v v8, (a1)
15 %a = load <2 x float>, ptr %x
16 %d = fptosi <2 x float> %a to <2 x i32>
17 store <2 x i32> %d, ptr %y
21 define void @fp2ui_v2f32_v2i32(ptr %x, ptr %y) {
22 ; CHECK-LABEL: fp2ui_v2f32_v2i32:
24 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
25 ; CHECK-NEXT: vle32.v v8, (a0)
26 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
27 ; CHECK-NEXT: vse32.v v8, (a1)
29 %a = load <2 x float>, ptr %x
30 %d = fptoui <2 x float> %a to <2 x i32>
31 store <2 x i32> %d, ptr %y
35 define <2 x i1> @fp2si_v2f32_v2i1(<2 x float> %x) {
36 ; CHECK-LABEL: fp2si_v2f32_v2i1:
38 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
39 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
40 ; CHECK-NEXT: vand.vi v8, v9, 1
41 ; CHECK-NEXT: vmsne.vi v0, v8, 0
43 %z = fptosi <2 x float> %x to <2 x i1>
47 define <2 x i15> @fp2si_v2f32_v2i15(<2 x float> %x) {
48 ; CHECK-LABEL: fp2si_v2f32_v2i15:
50 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
51 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
52 ; CHECK-NEXT: vmv1r.v v8, v9
54 %z = fptosi <2 x float> %x to <2 x i15>
58 define <2 x i15> @fp2ui_v2f32_v2i15(<2 x float> %x) {
59 ; CHECK-LABEL: fp2ui_v2f32_v2i15:
61 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
62 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
63 ; CHECK-NEXT: vmv1r.v v8, v9
65 %z = fptoui <2 x float> %x to <2 x i15>
69 define <2 x i1> @fp2ui_v2f32_v2i1(<2 x float> %x) {
70 ; CHECK-LABEL: fp2ui_v2f32_v2i1:
72 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
73 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
74 ; CHECK-NEXT: vand.vi v8, v9, 1
75 ; CHECK-NEXT: vmsne.vi v0, v8, 0
77 %z = fptoui <2 x float> %x to <2 x i1>
81 define void @fp2si_v3f32_v3i32(ptr %x, ptr %y) {
82 ; CHECK-LABEL: fp2si_v3f32_v3i32:
84 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
85 ; CHECK-NEXT: vle32.v v8, (a0)
86 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
87 ; CHECK-NEXT: vse32.v v8, (a1)
89 %a = load <3 x float>, ptr %x
90 %d = fptosi <3 x float> %a to <3 x i32>
91 store <3 x i32> %d, ptr %y
95 define void @fp2ui_v3f32_v3i32(ptr %x, ptr %y) {
96 ; CHECK-LABEL: fp2ui_v3f32_v3i32:
98 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
99 ; CHECK-NEXT: vle32.v v8, (a0)
100 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
101 ; CHECK-NEXT: vse32.v v8, (a1)
103 %a = load <3 x float>, ptr %x
104 %d = fptoui <3 x float> %a to <3 x i32>
105 store <3 x i32> %d, ptr %y
109 define <3 x i1> @fp2si_v3f32_v3i1(<3 x float> %x) {
110 ; CHECK-LABEL: fp2si_v3f32_v3i1:
112 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
113 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
114 ; CHECK-NEXT: vand.vi v8, v9, 1
115 ; CHECK-NEXT: vmsne.vi v0, v8, 0
117 %z = fptosi <3 x float> %x to <3 x i1>
121 ; FIXME: This is expanded when they could be widened + promoted
122 define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
123 ; ZVFH32-LABEL: fp2si_v3f32_v3i15:
125 ; ZVFH32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
126 ; ZVFH32-NEXT: vfncvt.rtz.x.f.w v9, v8
127 ; ZVFH32-NEXT: vslidedown.vi v8, v9, 2
128 ; ZVFH32-NEXT: vmv.x.s a1, v9
129 ; ZVFH32-NEXT: vslidedown.vi v9, v9, 1
130 ; ZVFH32-NEXT: vmv.x.s a2, v8
131 ; ZVFH32-NEXT: slli a1, a1, 17
132 ; ZVFH32-NEXT: srli a1, a1, 17
133 ; ZVFH32-NEXT: slli a3, a2, 30
134 ; ZVFH32-NEXT: or a1, a1, a3
135 ; ZVFH32-NEXT: vmv.x.s a3, v9
136 ; ZVFH32-NEXT: slli a2, a2, 17
137 ; ZVFH32-NEXT: slli a3, a3, 17
138 ; ZVFH32-NEXT: srli a2, a2, 19
139 ; ZVFH32-NEXT: srli a3, a3, 2
140 ; ZVFH32-NEXT: or a1, a1, a3
141 ; ZVFH32-NEXT: sw a1, 0(a0)
142 ; ZVFH32-NEXT: sh a2, 4(a0)
145 ; ZVFH64-LABEL: fp2si_v3f32_v3i15:
147 ; ZVFH64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
148 ; ZVFH64-NEXT: vfncvt.rtz.x.f.w v9, v8
149 ; ZVFH64-NEXT: vmv.x.s a1, v9
150 ; ZVFH64-NEXT: vslidedown.vi v8, v9, 1
151 ; ZVFH64-NEXT: vslidedown.vi v9, v9, 2
152 ; ZVFH64-NEXT: slli a1, a1, 49
153 ; ZVFH64-NEXT: vmv.x.s a2, v8
154 ; ZVFH64-NEXT: vmv.x.s a3, v9
155 ; ZVFH64-NEXT: srli a1, a1, 49
156 ; ZVFH64-NEXT: slli a2, a2, 49
157 ; ZVFH64-NEXT: slli a3, a3, 30
158 ; ZVFH64-NEXT: srli a2, a2, 34
159 ; ZVFH64-NEXT: or a1, a1, a3
160 ; ZVFH64-NEXT: or a1, a1, a2
161 ; ZVFH64-NEXT: slli a2, a1, 19
162 ; ZVFH64-NEXT: srli a2, a2, 51
163 ; ZVFH64-NEXT: sw a1, 0(a0)
164 ; ZVFH64-NEXT: sh a2, 4(a0)
167 ; ZVFHMIN32-LABEL: fp2si_v3f32_v3i15:
168 ; ZVFHMIN32: # %bb.0:
169 ; ZVFHMIN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
170 ; ZVFHMIN32-NEXT: vfncvt.rtz.x.f.w v9, v8
171 ; ZVFHMIN32-NEXT: vslidedown.vi v8, v9, 2
172 ; ZVFHMIN32-NEXT: vmv.x.s a1, v9
173 ; ZVFHMIN32-NEXT: vslidedown.vi v9, v9, 1
174 ; ZVFHMIN32-NEXT: vmv.x.s a2, v8
175 ; ZVFHMIN32-NEXT: slli a1, a1, 17
176 ; ZVFHMIN32-NEXT: srli a1, a1, 17
177 ; ZVFHMIN32-NEXT: slli a3, a2, 30
178 ; ZVFHMIN32-NEXT: or a1, a1, a3
179 ; ZVFHMIN32-NEXT: vmv.x.s a3, v9
180 ; ZVFHMIN32-NEXT: slli a2, a2, 17
181 ; ZVFHMIN32-NEXT: slli a3, a3, 17
182 ; ZVFHMIN32-NEXT: srli a2, a2, 19
183 ; ZVFHMIN32-NEXT: srli a3, a3, 2
184 ; ZVFHMIN32-NEXT: or a1, a1, a3
185 ; ZVFHMIN32-NEXT: sw a1, 0(a0)
186 ; ZVFHMIN32-NEXT: sh a2, 4(a0)
187 ; ZVFHMIN32-NEXT: ret
189 ; ZVFHMIN64-LABEL: fp2si_v3f32_v3i15:
190 ; ZVFHMIN64: # %bb.0:
191 ; ZVFHMIN64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
192 ; ZVFHMIN64-NEXT: vfncvt.rtz.x.f.w v9, v8
193 ; ZVFHMIN64-NEXT: vmv.x.s a1, v9
194 ; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 1
195 ; ZVFHMIN64-NEXT: vslidedown.vi v9, v9, 2
196 ; ZVFHMIN64-NEXT: slli a1, a1, 49
197 ; ZVFHMIN64-NEXT: vmv.x.s a2, v8
198 ; ZVFHMIN64-NEXT: vmv.x.s a3, v9
199 ; ZVFHMIN64-NEXT: srli a1, a1, 49
200 ; ZVFHMIN64-NEXT: slli a2, a2, 49
201 ; ZVFHMIN64-NEXT: slli a3, a3, 30
202 ; ZVFHMIN64-NEXT: srli a2, a2, 34
203 ; ZVFHMIN64-NEXT: or a1, a1, a3
204 ; ZVFHMIN64-NEXT: or a1, a1, a2
205 ; ZVFHMIN64-NEXT: slli a2, a1, 19
206 ; ZVFHMIN64-NEXT: srli a2, a2, 51
207 ; ZVFHMIN64-NEXT: sw a1, 0(a0)
208 ; ZVFHMIN64-NEXT: sh a2, 4(a0)
209 ; ZVFHMIN64-NEXT: ret
210 %z = fptosi <3 x float> %x to <3 x i15>
214 ; FIXME: This is expanded when they could be widened + promoted
215 define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
216 ; ZVFH32-LABEL: fp2ui_v3f32_v3i15:
218 ; ZVFH32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
219 ; ZVFH32-NEXT: vfncvt.rtz.x.f.w v9, v8
220 ; ZVFH32-NEXT: vslidedown.vi v8, v9, 2
221 ; ZVFH32-NEXT: vmv.x.s a1, v9
222 ; ZVFH32-NEXT: vslidedown.vi v9, v9, 1
223 ; ZVFH32-NEXT: vmv.x.s a2, v8
224 ; ZVFH32-NEXT: slli a1, a1, 16
225 ; ZVFH32-NEXT: srli a1, a1, 16
226 ; ZVFH32-NEXT: slli a3, a2, 30
227 ; ZVFH32-NEXT: or a1, a1, a3
228 ; ZVFH32-NEXT: vmv.x.s a3, v9
229 ; ZVFH32-NEXT: slli a2, a2, 17
230 ; ZVFH32-NEXT: slli a3, a3, 16
231 ; ZVFH32-NEXT: srli a2, a2, 19
232 ; ZVFH32-NEXT: srli a3, a3, 1
233 ; ZVFH32-NEXT: or a1, a1, a3
234 ; ZVFH32-NEXT: sw a1, 0(a0)
235 ; ZVFH32-NEXT: sh a2, 4(a0)
238 ; ZVFH64-LABEL: fp2ui_v3f32_v3i15:
240 ; ZVFH64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
241 ; ZVFH64-NEXT: vfncvt.rtz.x.f.w v9, v8
242 ; ZVFH64-NEXT: vmv.x.s a1, v9
243 ; ZVFH64-NEXT: vslidedown.vi v8, v9, 1
244 ; ZVFH64-NEXT: vslidedown.vi v9, v9, 2
245 ; ZVFH64-NEXT: slli a1, a1, 48
246 ; ZVFH64-NEXT: vmv.x.s a2, v8
247 ; ZVFH64-NEXT: vmv.x.s a3, v9
248 ; ZVFH64-NEXT: srli a1, a1, 48
249 ; ZVFH64-NEXT: slli a2, a2, 48
250 ; ZVFH64-NEXT: slli a3, a3, 30
251 ; ZVFH64-NEXT: srli a2, a2, 33
252 ; ZVFH64-NEXT: or a1, a1, a3
253 ; ZVFH64-NEXT: or a1, a1, a2
254 ; ZVFH64-NEXT: slli a2, a1, 19
255 ; ZVFH64-NEXT: srli a2, a2, 51
256 ; ZVFH64-NEXT: sw a1, 0(a0)
257 ; ZVFH64-NEXT: sh a2, 4(a0)
260 ; ZVFHMIN32-LABEL: fp2ui_v3f32_v3i15:
261 ; ZVFHMIN32: # %bb.0:
262 ; ZVFHMIN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
263 ; ZVFHMIN32-NEXT: vfncvt.rtz.x.f.w v9, v8
264 ; ZVFHMIN32-NEXT: vslidedown.vi v8, v9, 2
265 ; ZVFHMIN32-NEXT: vmv.x.s a1, v9
266 ; ZVFHMIN32-NEXT: vslidedown.vi v9, v9, 1
267 ; ZVFHMIN32-NEXT: vmv.x.s a2, v8
268 ; ZVFHMIN32-NEXT: slli a1, a1, 16
269 ; ZVFHMIN32-NEXT: srli a1, a1, 16
270 ; ZVFHMIN32-NEXT: slli a3, a2, 30
271 ; ZVFHMIN32-NEXT: or a1, a1, a3
272 ; ZVFHMIN32-NEXT: vmv.x.s a3, v9
273 ; ZVFHMIN32-NEXT: slli a2, a2, 17
274 ; ZVFHMIN32-NEXT: slli a3, a3, 16
275 ; ZVFHMIN32-NEXT: srli a2, a2, 19
276 ; ZVFHMIN32-NEXT: srli a3, a3, 1
277 ; ZVFHMIN32-NEXT: or a1, a1, a3
278 ; ZVFHMIN32-NEXT: sw a1, 0(a0)
279 ; ZVFHMIN32-NEXT: sh a2, 4(a0)
280 ; ZVFHMIN32-NEXT: ret
282 ; ZVFHMIN64-LABEL: fp2ui_v3f32_v3i15:
283 ; ZVFHMIN64: # %bb.0:
284 ; ZVFHMIN64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
285 ; ZVFHMIN64-NEXT: vfncvt.rtz.x.f.w v9, v8
286 ; ZVFHMIN64-NEXT: vmv.x.s a1, v9
287 ; ZVFHMIN64-NEXT: vslidedown.vi v8, v9, 1
288 ; ZVFHMIN64-NEXT: vslidedown.vi v9, v9, 2
289 ; ZVFHMIN64-NEXT: slli a1, a1, 48
290 ; ZVFHMIN64-NEXT: vmv.x.s a2, v8
291 ; ZVFHMIN64-NEXT: vmv.x.s a3, v9
292 ; ZVFHMIN64-NEXT: srli a1, a1, 48
293 ; ZVFHMIN64-NEXT: slli a2, a2, 48
294 ; ZVFHMIN64-NEXT: slli a3, a3, 30
295 ; ZVFHMIN64-NEXT: srli a2, a2, 33
296 ; ZVFHMIN64-NEXT: or a1, a1, a3
297 ; ZVFHMIN64-NEXT: or a1, a1, a2
298 ; ZVFHMIN64-NEXT: slli a2, a1, 19
299 ; ZVFHMIN64-NEXT: srli a2, a2, 51
300 ; ZVFHMIN64-NEXT: sw a1, 0(a0)
301 ; ZVFHMIN64-NEXT: sh a2, 4(a0)
302 ; ZVFHMIN64-NEXT: ret
303 %z = fptoui <3 x float> %x to <3 x i15>
307 define <3 x i1> @fp2ui_v3f32_v3i1(<3 x float> %x) {
308 ; CHECK-LABEL: fp2ui_v3f32_v3i1:
310 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
311 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
312 ; CHECK-NEXT: vand.vi v8, v9, 1
313 ; CHECK-NEXT: vmsne.vi v0, v8, 0
315 %z = fptoui <3 x float> %x to <3 x i1>
319 define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) {
320 ; CHECK-LABEL: fp2si_v8f32_v8i32:
322 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
323 ; CHECK-NEXT: vle32.v v8, (a0)
324 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
325 ; CHECK-NEXT: vse32.v v8, (a1)
327 %a = load <8 x float>, ptr %x
328 %d = fptosi <8 x float> %a to <8 x i32>
329 store <8 x i32> %d, ptr %y
333 define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) {
334 ; CHECK-LABEL: fp2ui_v8f32_v8i32:
336 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
337 ; CHECK-NEXT: vle32.v v8, (a0)
338 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
339 ; CHECK-NEXT: vse32.v v8, (a1)
341 %a = load <8 x float>, ptr %x
342 %d = fptoui <8 x float> %a to <8 x i32>
343 store <8 x i32> %d, ptr %y
347 define <8 x i1> @fp2si_v8f32_v8i1(<8 x float> %x) {
348 ; CHECK-LABEL: fp2si_v8f32_v8i1:
350 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
351 ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
352 ; CHECK-NEXT: vand.vi v8, v10, 1
353 ; CHECK-NEXT: vmsne.vi v0, v8, 0
355 %z = fptosi <8 x float> %x to <8 x i1>
359 define <8 x i1> @fp2ui_v8f32_v8i1(<8 x float> %x) {
360 ; CHECK-LABEL: fp2ui_v8f32_v8i1:
362 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
363 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
364 ; CHECK-NEXT: vand.vi v8, v10, 1
365 ; CHECK-NEXT: vmsne.vi v0, v8, 0
367 %z = fptoui <8 x float> %x to <8 x i1>
371 define void @fp2si_v2f32_v2i64(ptr %x, ptr %y) {
372 ; CHECK-LABEL: fp2si_v2f32_v2i64:
374 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
375 ; CHECK-NEXT: vle32.v v8, (a0)
376 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8
377 ; CHECK-NEXT: vse64.v v9, (a1)
379 %a = load <2 x float>, ptr %x
380 %d = fptosi <2 x float> %a to <2 x i64>
381 store <2 x i64> %d, ptr %y
385 define void @fp2ui_v2f32_v2i64(ptr %x, ptr %y) {
386 ; CHECK-LABEL: fp2ui_v2f32_v2i64:
388 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
389 ; CHECK-NEXT: vle32.v v8, (a0)
390 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8
391 ; CHECK-NEXT: vse64.v v9, (a1)
393 %a = load <2 x float>, ptr %x
394 %d = fptoui <2 x float> %a to <2 x i64>
395 store <2 x i64> %d, ptr %y
399 define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) {
400 ; CHECK-LABEL: fp2si_v8f32_v8i64:
402 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
403 ; CHECK-NEXT: vle32.v v8, (a0)
404 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8
405 ; CHECK-NEXT: vse64.v v12, (a1)
407 %a = load <8 x float>, ptr %x
408 %d = fptosi <8 x float> %a to <8 x i64>
409 store <8 x i64> %d, ptr %y
413 define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) {
414 ; CHECK-LABEL: fp2ui_v8f32_v8i64:
416 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
417 ; CHECK-NEXT: vle32.v v8, (a0)
418 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8
419 ; CHECK-NEXT: vse64.v v12, (a1)
421 %a = load <8 x float>, ptr %x
422 %d = fptoui <8 x float> %a to <8 x i64>
423 store <8 x i64> %d, ptr %y
427 define void @fp2si_v2bf16_v2i64(ptr %x, ptr %y) {
428 ; CHECK-LABEL: fp2si_v2bf16_v2i64:
430 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
431 ; CHECK-NEXT: vle16.v v8, (a0)
432 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
433 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
434 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9
435 ; CHECK-NEXT: vse64.v v8, (a1)
437 %a = load <2 x bfloat>, ptr %x
438 %d = fptosi <2 x bfloat> %a to <2 x i64>
439 store <2 x i64> %d, ptr %y
443 define void @fp2ui_v2bf16_v2i64(ptr %x, ptr %y) {
444 ; CHECK-LABEL: fp2ui_v2bf16_v2i64:
446 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
447 ; CHECK-NEXT: vle16.v v8, (a0)
448 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
449 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
450 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9
451 ; CHECK-NEXT: vse64.v v8, (a1)
453 %a = load <2 x bfloat>, ptr %x
454 %d = fptoui <2 x bfloat> %a to <2 x i64>
455 store <2 x i64> %d, ptr %y
459 define <2 x i1> @fp2si_v2bf16_v2i1(<2 x bfloat> %x) {
460 ; CHECK-LABEL: fp2si_v2bf16_v2i1:
462 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
463 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
464 ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9
465 ; CHECK-NEXT: vand.vi v8, v8, 1
466 ; CHECK-NEXT: vmsne.vi v0, v8, 0
468 %z = fptosi <2 x bfloat> %x to <2 x i1>
472 define <2 x i1> @fp2ui_v2bf16_v2i1(<2 x bfloat> %x) {
473 ; CHECK-LABEL: fp2ui_v2bf16_v2i1:
475 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
476 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
477 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9
478 ; CHECK-NEXT: vand.vi v8, v8, 1
479 ; CHECK-NEXT: vmsne.vi v0, v8, 0
481 %z = fptoui <2 x bfloat> %x to <2 x i1>
485 define void @fp2si_v2f16_v2i64(ptr %x, ptr %y) {
486 ; CHECK-LABEL: fp2si_v2f16_v2i64:
488 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
489 ; CHECK-NEXT: vle16.v v8, (a0)
490 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
491 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
492 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9
493 ; CHECK-NEXT: vse64.v v8, (a1)
495 %a = load <2 x half>, ptr %x
496 %d = fptosi <2 x half> %a to <2 x i64>
497 store <2 x i64> %d, ptr %y
501 define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) {
502 ; CHECK-LABEL: fp2ui_v2f16_v2i64:
504 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
505 ; CHECK-NEXT: vle16.v v8, (a0)
506 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
507 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
508 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9
509 ; CHECK-NEXT: vse64.v v8, (a1)
511 %a = load <2 x half>, ptr %x
512 %d = fptoui <2 x half> %a to <2 x i64>
513 store <2 x i64> %d, ptr %y
517 define <2 x i1> @fp2si_v2f16_v2i1(<2 x half> %x) {
518 ; ZVFH-LABEL: fp2si_v2f16_v2i1:
520 ; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
521 ; ZVFH-NEXT: vfncvt.rtz.x.f.w v9, v8
522 ; ZVFH-NEXT: vand.vi v8, v9, 1
523 ; ZVFH-NEXT: vmsne.vi v0, v8, 0
526 ; ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
528 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
529 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
530 ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
531 ; ZVFHMIN-NEXT: vand.vi v8, v8, 1
532 ; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
534 %z = fptosi <2 x half> %x to <2 x i1>
538 define <2 x i1> @fp2ui_v2f16_v2i1(<2 x half> %x) {
539 ; ZVFH-LABEL: fp2ui_v2f16_v2i1:
541 ; ZVFH-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
542 ; ZVFH-NEXT: vfncvt.rtz.xu.f.w v9, v8
543 ; ZVFH-NEXT: vand.vi v8, v9, 1
544 ; ZVFH-NEXT: vmsne.vi v0, v8, 0
547 ; ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
549 ; ZVFHMIN-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
550 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
551 ; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
552 ; ZVFHMIN-NEXT: vand.vi v8, v8, 1
553 ; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
555 %z = fptoui <2 x half> %x to <2 x i1>
559 define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) {
560 ; CHECK-LABEL: fp2si_v2f64_v2i8:
562 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
563 ; CHECK-NEXT: vle64.v v8, (a0)
564 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
565 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
566 ; CHECK-NEXT: vnsrl.wi v8, v9, 0
567 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
568 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
569 ; CHECK-NEXT: vse8.v v8, (a1)
571 %a = load <2 x double>, ptr %x
572 %d = fptosi <2 x double> %a to <2 x i8>
573 store <2 x i8> %d, ptr %y
577 define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) {
578 ; CHECK-LABEL: fp2ui_v2f64_v2i8:
580 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
581 ; CHECK-NEXT: vle64.v v8, (a0)
582 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
583 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
584 ; CHECK-NEXT: vnsrl.wi v8, v9, 0
585 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
586 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
587 ; CHECK-NEXT: vse8.v v8, (a1)
589 %a = load <2 x double>, ptr %x
590 %d = fptoui <2 x double> %a to <2 x i8>
591 store <2 x i8> %d, ptr %y
595 define <2 x i1> @fp2si_v2f64_v2i1(<2 x double> %x) {
596 ; CHECK-LABEL: fp2si_v2f64_v2i1:
598 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
599 ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
600 ; CHECK-NEXT: vand.vi v8, v9, 1
601 ; CHECK-NEXT: vmsne.vi v0, v8, 0
603 %z = fptosi <2 x double> %x to <2 x i1>
607 define <2 x i1> @fp2ui_v2f64_v2i1(<2 x double> %x) {
608 ; CHECK-LABEL: fp2ui_v2f64_v2i1:
610 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
611 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
612 ; CHECK-NEXT: vand.vi v8, v9, 1
613 ; CHECK-NEXT: vmsne.vi v0, v8, 0
615 %z = fptoui <2 x double> %x to <2 x i1>
619 define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
620 ; CHECK-LABEL: fp2si_v8f64_v8i8:
622 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
623 ; CHECK-NEXT: vle64.v v8, (a0)
624 ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
625 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
626 ; CHECK-NEXT: vnsrl.wi v8, v12, 0
627 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
628 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
629 ; CHECK-NEXT: vse8.v v8, (a1)
631 %a = load <8 x double>, ptr %x
632 %d = fptosi <8 x double> %a to <8 x i8>
633 store <8 x i8> %d, ptr %y
637 define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
638 ; CHECK-LABEL: fp2ui_v8f64_v8i8:
640 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
641 ; CHECK-NEXT: vle64.v v8, (a0)
642 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
643 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
644 ; CHECK-NEXT: vnsrl.wi v8, v12, 0
645 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
646 ; CHECK-NEXT: vnsrl.wi v8, v8, 0
647 ; CHECK-NEXT: vse8.v v8, (a1)
649 %a = load <8 x double>, ptr %x
650 %d = fptoui <8 x double> %a to <8 x i8>
651 store <8 x i8> %d, ptr %y
655 define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
656 ; CHECK-LABEL: fp2si_v8f64_v8i1:
658 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
659 ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
660 ; CHECK-NEXT: vand.vi v8, v12, 1
661 ; CHECK-NEXT: vmsne.vi v0, v8, 0
663 %z = fptosi <8 x double> %x to <8 x i1>
667 define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
668 ; CHECK-LABEL: fp2ui_v8f64_v8i1:
670 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
671 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
672 ; CHECK-NEXT: vand.vi v8, v12, 1
673 ; CHECK-NEXT: vmsne.vi v0, v8, 0
675 %z = fptoui <8 x double> %x to <8 x i1>