1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkned \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkned \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
7 declare <vscale x 4 x i32> @llvm.riscv.vaesef.vv.nxv4i32(
12 define <vscale x 4 x i32> @intrinsic_vaesef_vv_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vaesef_vv_nxv4i32:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
16 ; CHECK-NEXT: vaesef.vv v8, v10
19 %a = call <vscale x 4 x i32> @llvm.riscv.vaesef.vv.nxv4i32(
20 <vscale x 4 x i32> %0,
21 <vscale x 4 x i32> %1,
24 ret <vscale x 4 x i32> %a
27 declare <vscale x 8 x i32> @llvm.riscv.vaesef.vv.nxv8i32(
32 define <vscale x 8 x i32> @intrinsic_vaesef_vv_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
33 ; CHECK-LABEL: intrinsic_vaesef_vv_nxv8i32:
34 ; CHECK: # %bb.0: # %entry
35 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
36 ; CHECK-NEXT: vaesef.vv v8, v12
39 %a = call <vscale x 8 x i32> @llvm.riscv.vaesef.vv.nxv8i32(
40 <vscale x 8 x i32> %0,
41 <vscale x 8 x i32> %1,
44 ret <vscale x 8 x i32> %a
47 declare <vscale x 16 x i32> @llvm.riscv.vaesef.vv.nxv16i32(
52 define <vscale x 16 x i32> @intrinsic_vaesef_vv_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
53 ; CHECK-LABEL: intrinsic_vaesef_vv_nxv16i32:
54 ; CHECK: # %bb.0: # %entry
55 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
56 ; CHECK-NEXT: vaesef.vv v8, v16
59 %a = call <vscale x 16 x i32> @llvm.riscv.vaesef.vv.nxv16i32(
60 <vscale x 16 x i32> %0,
61 <vscale x 16 x i32> %1,
64 ret <vscale x 16 x i32> %a
67 declare <vscale x 4 x i32> @llvm.riscv.vaesef.vs.nxv4i32.nxv4i32(
72 define <vscale x 4 x i32> @intrinsic_vaesef_vs_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
73 ; CHECK-LABEL: intrinsic_vaesef_vs_nxv4i32:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
76 ; CHECK-NEXT: vaesef.vs v8, v10
79 %a = call <vscale x 4 x i32> @llvm.riscv.vaesef.vs.nxv4i32.nxv4i32(
80 <vscale x 4 x i32> %0,
81 <vscale x 4 x i32> %1,
84 ret <vscale x 4 x i32> %a
87 declare <vscale x 8 x i32> @llvm.riscv.vaesef.vs.nxv8i32.nxv4i32(
92 define <vscale x 8 x i32> @intrinsic_vaesef_vs_nxv8i32(<vscale x 8 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
93 ; CHECK-LABEL: intrinsic_vaesef_vs_nxv8i32:
94 ; CHECK: # %bb.0: # %entry
95 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
96 ; CHECK-NEXT: vaesef.vs v8, v12
99 %a = call <vscale x 8 x i32> @llvm.riscv.vaesef.vs.nxv8i32.nxv4i32(
100 <vscale x 8 x i32> %0,
101 <vscale x 4 x i32> %1,
104 ret <vscale x 8 x i32> %a
107 declare <vscale x 16 x i32> @llvm.riscv.vaesef.vs.nxv16i32.nxv4i32(
112 define <vscale x 16 x i32> @intrinsic_vaesef_vs_nxv16i32(<vscale x 16 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
113 ; CHECK-LABEL: intrinsic_vaesef_vs_nxv16i32:
114 ; CHECK: # %bb.0: # %entry
115 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
116 ; CHECK-NEXT: vaesef.vs v8, v16
119 %a = call <vscale x 16 x i32> @llvm.riscv.vaesef.vs.nxv16i32.nxv4i32(
120 <vscale x 16 x i32> %0,
121 <vscale x 4 x i32> %1,
124 ret <vscale x 16 x i32> %a