1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvknhb \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvknhb \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
6 ; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 -mattr=+v,+zvknha 2>&1 \
7 ; RUN: | FileCheck --check-prefixes=CHECK-ERROR %s
8 ; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 -mattr=+v,+zvknha 2>&1 \
9 ; RUN: | FileCheck --check-prefixes=CHECK-ERROR %s
11 ; CHECK-ERROR: LLVM ERROR: SEW=64 needs Zvknhb to be enabled.
13 declare <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32(
20 define <vscale x 4 x i32> @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
21 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
24 ; CHECK-NEXT: vsha2ch.vv v8, v10, v12
27 %a = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32(
28 <vscale x 4 x i32> %0,
29 <vscale x 4 x i32> %1,
30 <vscale x 4 x i32> %2,
34 ret <vscale x 4 x i32> %a
37 declare <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32(
44 define <vscale x 8 x i32> @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
45 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32:
46 ; CHECK: # %bb.0: # %entry
47 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
48 ; CHECK-NEXT: vsha2ch.vv v8, v12, v16
51 %a = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32(
52 <vscale x 8 x i32> %0,
53 <vscale x 8 x i32> %1,
54 <vscale x 8 x i32> %2,
58 ret <vscale x 8 x i32> %a
61 declare <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32(
68 define <vscale x 16 x i32> @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
69 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32:
70 ; CHECK: # %bb.0: # %entry
71 ; CHECK-NEXT: vl8re32.v v24, (a0)
72 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
73 ; CHECK-NEXT: vsha2ch.vv v8, v16, v24
76 %a = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32(
77 <vscale x 16 x i32> %0,
78 <vscale x 16 x i32> %1,
79 <vscale x 16 x i32> %2,
83 ret <vscale x 16 x i32> %a
86 declare <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64(
93 define <vscale x 4 x i64> @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
94 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64:
95 ; CHECK: # %bb.0: # %entry
96 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
97 ; CHECK-NEXT: vsha2ch.vv v8, v12, v16
100 %a = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64(
101 <vscale x 4 x i64> %0,
102 <vscale x 4 x i64> %1,
103 <vscale x 4 x i64> %2,
107 ret <vscale x 4 x i64> %a
110 declare <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64(
117 define <vscale x 8 x i64> @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
118 ; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64:
119 ; CHECK: # %bb.0: # %entry
120 ; CHECK-NEXT: vl8re64.v v24, (a0)
121 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma
122 ; CHECK-NEXT: vsha2ch.vv v8, v16, v24
125 %a = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64(
126 <vscale x 8 x i64> %0,
127 <vscale x 8 x i64> %1,
128 <vscale x 8 x i64> %2,
132 ret <vscale x 8 x i64> %a