1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +zvbb \
5 // RUN: -target-feature +zvbc \
6 // RUN: -target-feature +zvkb \
7 // RUN: -target-feature +zvkg \
8 // RUN: -target-feature +zvkned \
9 // RUN: -target-feature +zvknhb \
10 // RUN: -target-feature +zvksed \
11 // RUN: -target-feature +zvksh \
12 // RUN: -disable-O0-optnone \
13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
14 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
16 #include <riscv_vector.h>
18 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vaesem_vv_u32mf2
19 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
22 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
24 vuint32mf2_t
test_vaesem_vv_u32mf2(vuint32mf2_t vd
, vuint32mf2_t vs2
, size_t vl
) {
25 return __riscv_vaesem_vv(vd
, vs2
, vl
);
28 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vaesem_vs_u32mf2_u32mf2
29 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
30 // CHECK-RV64-NEXT: entry:
31 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vs.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
32 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
34 vuint32mf2_t
test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd
, vuint32mf2_t vs2
, size_t vl
) {
35 return __riscv_vaesem_vs(vd
, vs2
, vl
);
38 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vaesem_vs_u32mf2_u32m1
39 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
40 // CHECK-RV64-NEXT: entry:
41 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vs.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
42 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
44 vuint32m1_t
test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd
, vuint32mf2_t vs2
, size_t vl
) {
45 return __riscv_vaesem_vs(vd
, vs2
, vl
);
48 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vaesem_vs_u32mf2_u32m2
49 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
50 // CHECK-RV64-NEXT: entry:
51 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.nxv1i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
52 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
54 vuint32m2_t
test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd
, vuint32mf2_t vs2
, size_t vl
) {
55 return __riscv_vaesem_vs(vd
, vs2
, vl
);
58 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vaesem_vs_u32mf2_u32m4
59 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
60 // CHECK-RV64-NEXT: entry:
61 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.nxv1i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
62 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
64 vuint32m4_t
test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd
, vuint32mf2_t vs2
, size_t vl
) {
65 return __riscv_vaesem_vs(vd
, vs2
, vl
);
68 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vaesem_vs_u32mf2_u32m8
69 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
70 // CHECK-RV64-NEXT: entry:
71 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.nxv1i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 3)
72 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
74 vuint32m8_t
test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd
, vuint32mf2_t vs2
, size_t vl
) {
75 return __riscv_vaesem_vs(vd
, vs2
, vl
);
78 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vaesem_vv_u32m1
79 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
80 // CHECK-RV64-NEXT: entry:
81 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 3)
82 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
84 vuint32m1_t
test_vaesem_vv_u32m1(vuint32m1_t vd
, vuint32m1_t vs2
, size_t vl
) {
85 return __riscv_vaesem_vv(vd
, vs2
, vl
);
88 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vaesem_vs_u32m1_u32m1
89 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vs.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 3)
92 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
94 vuint32m1_t
test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd
, vuint32m1_t vs2
, size_t vl
) {
95 return __riscv_vaesem_vs(vd
, vs2
, vl
);
98 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vaesem_vs_u32m1_u32m2
99 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
100 // CHECK-RV64-NEXT: entry:
101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 3)
102 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
104 vuint32m2_t
test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd
, vuint32m1_t vs2
, size_t vl
) {
105 return __riscv_vaesem_vs(vd
, vs2
, vl
);
108 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vaesem_vs_u32m1_u32m4
109 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
110 // CHECK-RV64-NEXT: entry:
111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.nxv2i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 3)
112 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
114 vuint32m4_t
test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd
, vuint32m1_t vs2
, size_t vl
) {
115 return __riscv_vaesem_vs(vd
, vs2
, vl
);
118 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vaesem_vs_u32m1_u32m8
119 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.nxv2i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 3)
122 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
124 vuint32m8_t
test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd
, vuint32m1_t vs2
, size_t vl
) {
125 return __riscv_vaesem_vs(vd
, vs2
, vl
);
128 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vaesem_vv_u32m2
129 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 3)
132 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
134 vuint32m2_t
test_vaesem_vv_u32m2(vuint32m2_t vd
, vuint32m2_t vs2
, size_t vl
) {
135 return __riscv_vaesem_vv(vd
, vs2
, vl
);
138 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vaesem_vs_u32m2_u32m2
139 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
140 // CHECK-RV64-NEXT: entry:
141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 3)
142 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
144 vuint32m2_t
test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd
, vuint32m2_t vs2
, size_t vl
) {
145 return __riscv_vaesem_vs(vd
, vs2
, vl
);
148 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vaesem_vs_u32m2_u32m4
149 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
150 // CHECK-RV64-NEXT: entry:
151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 3)
152 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
154 vuint32m4_t
test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd
, vuint32m2_t vs2
, size_t vl
) {
155 return __riscv_vaesem_vs(vd
, vs2
, vl
);
158 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vaesem_vs_u32m2_u32m8
159 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
160 // CHECK-RV64-NEXT: entry:
161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.nxv4i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 3)
162 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
164 vuint32m8_t
test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd
, vuint32m2_t vs2
, size_t vl
) {
165 return __riscv_vaesem_vs(vd
, vs2
, vl
);
168 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vaesem_vv_u32m4
169 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
170 // CHECK-RV64-NEXT: entry:
171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 3)
172 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
174 vuint32m4_t
test_vaesem_vv_u32m4(vuint32m4_t vd
, vuint32m4_t vs2
, size_t vl
) {
175 return __riscv_vaesem_vv(vd
, vs2
, vl
);
178 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vaesem_vs_u32m4_u32m4
179 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
180 // CHECK-RV64-NEXT: entry:
181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 3)
182 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
184 vuint32m4_t
test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd
, vuint32m4_t vs2
, size_t vl
) {
185 return __riscv_vaesem_vs(vd
, vs2
, vl
);
188 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vaesem_vs_u32m4_u32m8
189 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
190 // CHECK-RV64-NEXT: entry:
191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 3)
192 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
194 vuint32m8_t
test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd
, vuint32m4_t vs2
, size_t vl
) {
195 return __riscv_vaesem_vs(vd
, vs2
, vl
);
198 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vaesem_vv_u32m8
199 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
200 // CHECK-RV64-NEXT: entry:
201 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]], i64 3)
202 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
204 vuint32m8_t
test_vaesem_vv_u32m8(vuint32m8_t vd
, vuint32m8_t vs2
, size_t vl
) {
205 return __riscv_vaesem_vv(vd
, vs2
, vl
);