[AMDGPU] Implement IR variant of isFMAFasterThanFMulAndFAdd (#121465)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / overloaded / vaeskf1.c
blob3838b222f0478b4bafc1ec006dfe20a33be7435c
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +zvbb \
5 // RUN: -target-feature +zvbc \
6 // RUN: -target-feature +zvkb \
7 // RUN: -target-feature +zvkg \
8 // RUN: -target-feature +zvkned \
9 // RUN: -target-feature +zvknhb \
10 // RUN: -target-feature +zvksed \
11 // RUN: -target-feature +zvksh \
12 // RUN: -disable-O0-optnone \
13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
14 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
16 #include <riscv_vector.h>
18 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vaeskf1_vi_u32mf2
19 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaeskf1.nxv1i32.i64.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 0, i64 [[VL]])
22 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
24 vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) {
25 return __riscv_vaeskf1(vs2, 0, vl);
28 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vaeskf1_vi_u32m1
29 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
30 // CHECK-RV64-NEXT: entry:
31 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaeskf1.nxv2i32.i64.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 0, i64 [[VL]])
32 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
34 vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) {
35 return __riscv_vaeskf1(vs2, 0, vl);
38 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vaeskf1_vi_u32m2
39 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
40 // CHECK-RV64-NEXT: entry:
41 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaeskf1.nxv4i32.i64.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 0, i64 [[VL]])
42 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
44 vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) {
45 return __riscv_vaeskf1(vs2, 0, vl);
48 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vaeskf1_vi_u32m4
49 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
50 // CHECK-RV64-NEXT: entry:
51 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaeskf1.nxv8i32.i64.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 0, i64 [[VL]])
52 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
54 vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) {
55 return __riscv_vaeskf1(vs2, 0, vl);
58 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vaeskf1_vi_u32m8
59 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
60 // CHECK-RV64-NEXT: entry:
61 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaeskf1.nxv16i32.i64.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 0, i64 [[VL]])
62 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
64 vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) {
65 return __riscv_vaeskf1(vs2, 0, vl);