Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / policy / overloaded / vsm3c.c
blobe01fcd1c1bc0f1a0e094d275a2eccc483fe9aecf
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +experimental-zvbb \
5 // RUN: -target-feature +experimental-zvbc \
6 // RUN: -target-feature +experimental-zvkg \
7 // RUN: -target-feature +experimental-zvkned \
8 // RUN: -target-feature +experimental-zvknhb \
9 // RUN: -target-feature +experimental-zvksed \
10 // RUN: -target-feature +experimental-zvksh -disable-O0-optnone \
11 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
12 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
14 #include <riscv_vector.h>
16 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsm3c_vi_u32mf2_tu
17 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm3c.nxv1i32.i64.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 0, i64 [[VL]], i64 2)
20 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
22 vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
23 return __riscv_vsm3c_tu(vd, vs2, 0, vl);
26 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsm3c_vi_u32m1_tu
27 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
28 // CHECK-RV64-NEXT: entry:
29 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm3c.nxv2i32.i64.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 0, i64 [[VL]], i64 2)
30 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
32 vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
33 return __riscv_vsm3c_tu(vd, vs2, 0, vl);
36 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsm3c_vi_u32m2_tu
37 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
38 // CHECK-RV64-NEXT: entry:
39 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm3c.nxv4i32.i64.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 0, i64 [[VL]], i64 2)
40 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
42 vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
43 return __riscv_vsm3c_tu(vd, vs2, 0, vl);
46 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsm3c_vi_u32m4_tu
47 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
48 // CHECK-RV64-NEXT: entry:
49 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm3c.nxv8i32.i64.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 0, i64 [[VL]], i64 2)
50 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
52 vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
53 return __riscv_vsm3c_tu(vd, vs2, 0, vl);
56 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsm3c_vi_u32m8_tu
57 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
58 // CHECK-RV64-NEXT: entry:
59 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm3c.nxv16i32.i64.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 0, i64 [[VL]], i64 2)
60 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
62 vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
63 return __riscv_vsm3c_tu(vd, vs2, 0, vl);