1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
12 define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
13 ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
16 ; CHECK-NEXT: vfclass.v v8, v8
18 <vscale x 1 x half> %0,
21 %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
22 <vscale x 1 x i16> undef,
23 <vscale x 1 x half> %0,
26 ret <vscale x 1 x i16> %a
29 declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
35 define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
36 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
39 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
41 <vscale x 1 x i16> %0,
42 <vscale x 1 x half> %1,
46 %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
47 <vscale x 1 x i16> %0,
48 <vscale x 1 x half> %1,
52 ret <vscale x 1 x i16> %a
55 declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
60 define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
61 ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
64 ; CHECK-NEXT: vfclass.v v8, v8
66 <vscale x 2 x half> %0,
69 %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
70 <vscale x 2 x i16> undef,
71 <vscale x 2 x half> %0,
74 ret <vscale x 2 x i16> %a
77 declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
83 define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
84 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
85 ; CHECK: # %bb.0: # %entry
86 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
87 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
89 <vscale x 2 x i16> %0,
90 <vscale x 2 x half> %1,
94 %a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
95 <vscale x 2 x i16> %0,
96 <vscale x 2 x half> %1,
100 ret <vscale x 2 x i16> %a
103 declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
108 define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
109 ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
112 ; CHECK-NEXT: vfclass.v v8, v8
114 <vscale x 4 x half> %0,
117 %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
118 <vscale x 4 x i16> undef,
119 <vscale x 4 x half> %0,
122 ret <vscale x 4 x i16> %a
125 declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
131 define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
132 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
135 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
137 <vscale x 4 x i16> %0,
138 <vscale x 4 x half> %1,
139 <vscale x 4 x i1> %2,
142 %a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
143 <vscale x 4 x i16> %0,
144 <vscale x 4 x half> %1,
145 <vscale x 4 x i1> %2,
148 ret <vscale x 4 x i16> %a
151 declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
156 define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
157 ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16:
158 ; CHECK: # %bb.0: # %entry
159 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
160 ; CHECK-NEXT: vfclass.v v8, v8
162 <vscale x 8 x half> %0,
165 %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
166 <vscale x 8 x i16> undef,
167 <vscale x 8 x half> %0,
170 ret <vscale x 8 x i16> %a
173 declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
179 define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
180 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
181 ; CHECK: # %bb.0: # %entry
182 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
183 ; CHECK-NEXT: vfclass.v v8, v10, v0.t
185 <vscale x 8 x i16> %0,
186 <vscale x 8 x half> %1,
187 <vscale x 8 x i1> %2,
190 %a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
191 <vscale x 8 x i16> %0,
192 <vscale x 8 x half> %1,
193 <vscale x 8 x i1> %2,
196 ret <vscale x 8 x i16> %a
199 declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
201 <vscale x 16 x half>,
204 define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
205 ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16:
206 ; CHECK: # %bb.0: # %entry
207 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
208 ; CHECK-NEXT: vfclass.v v8, v8
210 <vscale x 16 x half> %0,
213 %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
214 <vscale x 16 x i16> undef,
215 <vscale x 16 x half> %0,
218 ret <vscale x 16 x i16> %a
221 declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
223 <vscale x 16 x half>,
227 define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
228 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
229 ; CHECK: # %bb.0: # %entry
230 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
231 ; CHECK-NEXT: vfclass.v v8, v12, v0.t
233 <vscale x 16 x i16> %0,
234 <vscale x 16 x half> %1,
235 <vscale x 16 x i1> %2,
238 %a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
239 <vscale x 16 x i16> %0,
240 <vscale x 16 x half> %1,
241 <vscale x 16 x i1> %2,
244 ret <vscale x 16 x i16> %a
247 declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
249 <vscale x 32 x half>,
252 define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
253 ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16:
254 ; CHECK: # %bb.0: # %entry
255 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
256 ; CHECK-NEXT: vfclass.v v8, v8
258 <vscale x 32 x half> %0,
261 %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
262 <vscale x 32 x i16> undef,
263 <vscale x 32 x half> %0,
266 ret <vscale x 32 x i16> %a
269 declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
271 <vscale x 32 x half>,
275 define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
276 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
277 ; CHECK: # %bb.0: # %entry
278 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu
279 ; CHECK-NEXT: vfclass.v v8, v16, v0.t
281 <vscale x 32 x i16> %0,
282 <vscale x 32 x half> %1,
283 <vscale x 32 x i1> %2,
286 %a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
287 <vscale x 32 x i16> %0,
288 <vscale x 32 x half> %1,
289 <vscale x 32 x i1> %2,
292 ret <vscale x 32 x i16> %a
295 declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
297 <vscale x 1 x float>,
300 define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
301 ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32:
302 ; CHECK: # %bb.0: # %entry
303 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
304 ; CHECK-NEXT: vfclass.v v8, v8
306 <vscale x 1 x float> %0,
309 %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
310 <vscale x 1 x i32> undef,
311 <vscale x 1 x float> %0,
314 ret <vscale x 1 x i32> %a
317 declare <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
319 <vscale x 1 x float>,
323 define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
324 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
325 ; CHECK: # %bb.0: # %entry
326 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
327 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
329 <vscale x 1 x i32> %0,
330 <vscale x 1 x float> %1,
331 <vscale x 1 x i1> %2,
334 %a = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
335 <vscale x 1 x i32> %0,
336 <vscale x 1 x float> %1,
337 <vscale x 1 x i1> %2,
340 ret <vscale x 1 x i32> %a
343 declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
345 <vscale x 2 x float>,
348 define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
349 ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32:
350 ; CHECK: # %bb.0: # %entry
351 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
352 ; CHECK-NEXT: vfclass.v v8, v8
354 <vscale x 2 x float> %0,
357 %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
358 <vscale x 2 x i32> undef,
359 <vscale x 2 x float> %0,
362 ret <vscale x 2 x i32> %a
365 declare <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
367 <vscale x 2 x float>,
371 define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
372 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
373 ; CHECK: # %bb.0: # %entry
374 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
375 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
377 <vscale x 2 x i32> %0,
378 <vscale x 2 x float> %1,
379 <vscale x 2 x i1> %2,
382 %a = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
383 <vscale x 2 x i32> %0,
384 <vscale x 2 x float> %1,
385 <vscale x 2 x i1> %2,
388 ret <vscale x 2 x i32> %a
391 declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
393 <vscale x 4 x float>,
396 define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
397 ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32:
398 ; CHECK: # %bb.0: # %entry
399 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
400 ; CHECK-NEXT: vfclass.v v8, v8
402 <vscale x 4 x float> %0,
405 %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
406 <vscale x 4 x i32> undef,
407 <vscale x 4 x float> %0,
410 ret <vscale x 4 x i32> %a
413 declare <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
415 <vscale x 4 x float>,
419 define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
420 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
421 ; CHECK: # %bb.0: # %entry
422 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
423 ; CHECK-NEXT: vfclass.v v8, v10, v0.t
425 <vscale x 4 x i32> %0,
426 <vscale x 4 x float> %1,
427 <vscale x 4 x i1> %2,
430 %a = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
431 <vscale x 4 x i32> %0,
432 <vscale x 4 x float> %1,
433 <vscale x 4 x i1> %2,
436 ret <vscale x 4 x i32> %a
439 declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
441 <vscale x 8 x float>,
444 define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
445 ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32:
446 ; CHECK: # %bb.0: # %entry
447 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
448 ; CHECK-NEXT: vfclass.v v8, v8
450 <vscale x 8 x float> %0,
453 %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
454 <vscale x 8 x i32> undef,
455 <vscale x 8 x float> %0,
458 ret <vscale x 8 x i32> %a
461 declare <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
463 <vscale x 8 x float>,
467 define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
468 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
469 ; CHECK: # %bb.0: # %entry
470 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
471 ; CHECK-NEXT: vfclass.v v8, v12, v0.t
473 <vscale x 8 x i32> %0,
474 <vscale x 8 x float> %1,
475 <vscale x 8 x i1> %2,
478 %a = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
479 <vscale x 8 x i32> %0,
480 <vscale x 8 x float> %1,
481 <vscale x 8 x i1> %2,
484 ret <vscale x 8 x i32> %a
487 declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
489 <vscale x 16 x float>,
492 define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
493 ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32:
494 ; CHECK: # %bb.0: # %entry
495 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
496 ; CHECK-NEXT: vfclass.v v8, v8
498 <vscale x 16 x float> %0,
501 %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
502 <vscale x 16 x i32> undef,
503 <vscale x 16 x float> %0,
506 ret <vscale x 16 x i32> %a
509 declare <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
511 <vscale x 16 x float>,
515 define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
516 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
517 ; CHECK: # %bb.0: # %entry
518 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
519 ; CHECK-NEXT: vfclass.v v8, v16, v0.t
521 <vscale x 16 x i32> %0,
522 <vscale x 16 x float> %1,
523 <vscale x 16 x i1> %2,
526 %a = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
527 <vscale x 16 x i32> %0,
528 <vscale x 16 x float> %1,
529 <vscale x 16 x i1> %2,
532 ret <vscale x 16 x i32> %a
535 declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
537 <vscale x 1 x double>,
540 define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
541 ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
544 ; CHECK-NEXT: vfclass.v v8, v8
546 <vscale x 1 x double> %0,
549 %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
550 <vscale x 1 x i64> undef,
551 <vscale x 1 x double> %0,
554 ret <vscale x 1 x i64> %a
557 declare <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
559 <vscale x 1 x double>,
563 define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
564 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
565 ; CHECK: # %bb.0: # %entry
566 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
567 ; CHECK-NEXT: vfclass.v v8, v9, v0.t
569 <vscale x 1 x i64> %0,
570 <vscale x 1 x double> %1,
571 <vscale x 1 x i1> %2,
574 %a = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
575 <vscale x 1 x i64> %0,
576 <vscale x 1 x double> %1,
577 <vscale x 1 x i1> %2,
580 ret <vscale x 1 x i64> %a
583 declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
585 <vscale x 2 x double>,
588 define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
589 ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
592 ; CHECK-NEXT: vfclass.v v8, v8
594 <vscale x 2 x double> %0,
597 %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
598 <vscale x 2 x i64> undef,
599 <vscale x 2 x double> %0,
602 ret <vscale x 2 x i64> %a
605 declare <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
607 <vscale x 2 x double>,
611 define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
612 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
615 ; CHECK-NEXT: vfclass.v v8, v10, v0.t
617 <vscale x 2 x i64> %0,
618 <vscale x 2 x double> %1,
619 <vscale x 2 x i1> %2,
622 %a = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
623 <vscale x 2 x i64> %0,
624 <vscale x 2 x double> %1,
625 <vscale x 2 x i1> %2,
628 ret <vscale x 2 x i64> %a
631 declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
633 <vscale x 4 x double>,
636 define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
637 ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
640 ; CHECK-NEXT: vfclass.v v8, v8
642 <vscale x 4 x double> %0,
645 %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
646 <vscale x 4 x i64> undef,
647 <vscale x 4 x double> %0,
650 ret <vscale x 4 x i64> %a
653 declare <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
655 <vscale x 4 x double>,
659 define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
660 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
661 ; CHECK: # %bb.0: # %entry
662 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
663 ; CHECK-NEXT: vfclass.v v8, v12, v0.t
665 <vscale x 4 x i64> %0,
666 <vscale x 4 x double> %1,
667 <vscale x 4 x i1> %2,
670 %a = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
671 <vscale x 4 x i64> %0,
672 <vscale x 4 x double> %1,
673 <vscale x 4 x i1> %2,
676 ret <vscale x 4 x i64> %a
679 declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
681 <vscale x 8 x double>,
684 define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
685 ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64:
686 ; CHECK: # %bb.0: # %entry
687 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
688 ; CHECK-NEXT: vfclass.v v8, v8
690 <vscale x 8 x double> %0,
693 %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
694 <vscale x 8 x i64> undef,
695 <vscale x 8 x double> %0,
698 ret <vscale x 8 x i64> %a
701 declare <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
703 <vscale x 8 x double>,
707 define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
708 ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
709 ; CHECK: # %bb.0: # %entry
710 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu
711 ; CHECK-NEXT: vfclass.v v8, v16, v0.t
713 <vscale x 8 x i64> %0,
714 <vscale x 8 x double> %1,
715 <vscale x 8 x i1> %2,
718 %a = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
719 <vscale x 8 x i64> %0,
720 <vscale x 8 x double> %1,
721 <vscale x 8 x i1> %2,
724 ret <vscale x 8 x i64> %a