1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvknhb \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvknhb \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
7 declare <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32(
14 define <vscale x 4 x i32> @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
15 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32:
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
18 ; CHECK-NEXT: vsha2ch.vv v8, v10, v12
21 %a = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32(
22 <vscale x 4 x i32> %0,
23 <vscale x 4 x i32> %1,
24 <vscale x 4 x i32> %2,
28 ret <vscale x 4 x i32> %a
31 declare <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32(
38 define <vscale x 8 x i32> @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
39 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
42 ; CHECK-NEXT: vsha2ch.vv v8, v12, v16
45 %a = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32(
46 <vscale x 8 x i32> %0,
47 <vscale x 8 x i32> %1,
48 <vscale x 8 x i32> %2,
52 ret <vscale x 8 x i32> %a
55 declare <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32(
62 define <vscale x 16 x i32> @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
63 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32:
64 ; CHECK: # %bb.0: # %entry
65 ; CHECK-NEXT: vl8re32.v v24, (a0)
66 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
67 ; CHECK-NEXT: vsha2ch.vv v8, v16, v24
70 %a = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32(
71 <vscale x 16 x i32> %0,
72 <vscale x 16 x i32> %1,
73 <vscale x 16 x i32> %2,
77 ret <vscale x 16 x i32> %a
80 declare <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64(
87 define <vscale x 4 x i64> @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
88 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
91 ; CHECK-NEXT: vsha2ch.vv v8, v12, v16
94 %a = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64(
95 <vscale x 4 x i64> %0,
96 <vscale x 4 x i64> %1,
97 <vscale x 4 x i64> %2,
101 ret <vscale x 4 x i64> %a
104 declare <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64(
111 define <vscale x 8 x i64> @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
112 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vl8re64.v v24, (a0)
115 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma
116 ; CHECK-NEXT: vsha2ch.vv v8, v16, v24
119 %a = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64(
120 <vscale x 8 x i64> %0,
121 <vscale x 8 x i64> %1,
122 <vscale x 8 x i64> %2,
126 ret <vscale x 8 x i64> %a