1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +experimental-zvbb \
5 // RUN: -target-feature +experimental-zvbc \
6 // RUN: -target-feature +experimental-zvkg \
7 // RUN: -target-feature +experimental-zvkned \
8 // RUN: -target-feature +experimental-zvknhb \
9 // RUN: -target-feature +experimental-zvksed \
10 // RUN: -target-feature +experimental-zvksh -disable-O0-optnone \
11 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
12 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
14 #include <riscv_vector.h>
16 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vsha2ms_vv_u32mf2_tu
17 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]], i64 2)
20 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
22 vuint32mf2_t
test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd
, vuint32mf2_t vs2
, vuint32mf2_t vs1
, size_t vl
) {
23 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
26 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vsha2ms_vv_u32m1_tu
27 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
28 // CHECK-RV64-NEXT: entry:
29 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]], i64 2)
30 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
32 vuint32m1_t
test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd
, vuint32m1_t vs2
, vuint32m1_t vs1
, size_t vl
) {
33 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
36 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vsha2ms_vv_u32m2_tu
37 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
38 // CHECK-RV64-NEXT: entry:
39 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]], i64 2)
40 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
42 vuint32m2_t
test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd
, vuint32m2_t vs2
, vuint32m2_t vs1
, size_t vl
) {
43 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
46 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vsha2ms_vv_u32m4_tu
47 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
48 // CHECK-RV64-NEXT: entry:
49 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]], i64 2)
50 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
52 vuint32m4_t
test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd
, vuint32m4_t vs2
, vuint32m4_t vs1
, size_t vl
) {
53 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
56 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vsha2ms_vv_u32m8_tu
57 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
58 // CHECK-RV64-NEXT: entry:
59 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]], i64 2)
60 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
62 vuint32m8_t
test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd
, vuint32m8_t vs2
, vuint32m8_t vs1
, size_t vl
) {
63 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
66 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vsha2ms_vv_u64m1_tu
67 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
68 // CHECK-RV64-NEXT: entry:
69 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]], i64 2)
70 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
72 vuint64m1_t
test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd
, vuint64m1_t vs2
, vuint64m1_t vs1
, size_t vl
) {
73 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
76 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vsha2ms_vv_u64m2_tu
77 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
78 // CHECK-RV64-NEXT: entry:
79 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]], i64 2)
80 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
82 vuint64m2_t
test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd
, vuint64m2_t vs2
, vuint64m2_t vs1
, size_t vl
) {
83 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
86 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vsha2ms_vv_u64m4_tu
87 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
88 // CHECK-RV64-NEXT: entry:
89 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]], i64 2)
90 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
92 vuint64m4_t
test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd
, vuint64m4_t vs2
, vuint64m4_t vs1
, size_t vl
) {
93 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);
96 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vsha2ms_vv_u64m8_tu
97 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]], i64 2)
100 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
102 vuint64m8_t
test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd
, vuint64m8_t vs2
, vuint64m8_t vs1
, size_t vl
) {
103 return __riscv_vsha2ms_tu(vd
, vs2
, vs1
, vl
);