1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 #include <sifive_vector.h>
9 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m1(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret void
14 void test_sf_vc_xvv_se_u64m1(vuint64m1_t vd
, vuint64m1_t vs2
, uint64_t rs1
, size_t vl
) {
15 __riscv_sf_vc_xvv_se_u64m1(p27_26
, vd
, vs2
, rs1
, vl
);
18 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m2(
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT: ret void
23 void test_sf_vc_xvv_se_u64m2(vuint64m2_t vd
, vuint64m2_t vs2
, uint64_t rs1
, size_t vl
) {
24 __riscv_sf_vc_xvv_se_u64m2(p27_26
, vd
, vs2
, rs1
, vl
);
27 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m4(
28 // CHECK-RV64-NEXT: entry:
29 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
30 // CHECK-RV64-NEXT: ret void
32 void test_sf_vc_xvv_se_u64m4(vuint64m4_t vd
, vuint64m4_t vs2
, uint64_t rs1
, size_t vl
) {
33 __riscv_sf_vc_xvv_se_u64m4(p27_26
, vd
, vs2
, rs1
, vl
);
36 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u64m8(
37 // CHECK-RV64-NEXT: entry:
38 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
39 // CHECK-RV64-NEXT: ret void
41 void test_sf_vc_xvv_se_u64m8(vuint64m8_t vd
, vuint64m8_t vs2
, uint64_t rs1
, size_t vl
) {
42 __riscv_sf_vc_xvv_se_u64m8(p27_26
, vd
, vs2
, rs1
, vl
);
45 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m1(
46 // CHECK-RV64-NEXT: entry:
47 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
50 vuint64m1_t
test_sf_vc_v_xvv_se_u64m1(vuint64m1_t vd
, vuint64m1_t vs2
, uint64_t rs1
, size_t vl
) {
51 return __riscv_sf_vc_v_xvv_se_u64m1(p27_26
, vd
, vs2
, rs1
, vl
);
54 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m2(
55 // CHECK-RV64-NEXT: entry:
56 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
57 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
59 vuint64m2_t
test_sf_vc_v_xvv_se_u64m2(vuint64m2_t vd
, vuint64m2_t vs2
, uint64_t rs1
, size_t vl
) {
60 return __riscv_sf_vc_v_xvv_se_u64m2(p27_26
, vd
, vs2
, rs1
, vl
);
63 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m4(
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
68 vuint64m4_t
test_sf_vc_v_xvv_se_u64m4(vuint64m4_t vd
, vuint64m4_t vs2
, uint64_t rs1
, size_t vl
) {
69 return __riscv_sf_vc_v_xvv_se_u64m4(p27_26
, vd
, vs2
, rs1
, vl
);
72 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u64m8(
73 // CHECK-RV64-NEXT: entry:
74 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.se.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
77 vuint64m8_t
test_sf_vc_v_xvv_se_u64m8(vuint64m8_t vd
, vuint64m8_t vs2
, uint64_t rs1
, size_t vl
) {
78 return __riscv_sf_vc_v_xvv_se_u64m8(p27_26
, vd
, vs2
, rs1
, vl
);
81 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m1(
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvv.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
84 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
86 vuint64m1_t
test_sf_vc_v_xvv_u64m1(vuint64m1_t vd
, vuint64m1_t vs2
, uint64_t rs1
, size_t vl
) {
87 return __riscv_sf_vc_v_xvv_u64m1(p27_26
, vd
, vs2
, rs1
, vl
);
90 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m2(
91 // CHECK-RV64-NEXT: entry:
92 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvv.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
93 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
95 vuint64m2_t
test_sf_vc_v_xvv_u64m2(vuint64m2_t vd
, vuint64m2_t vs2
, uint64_t rs1
, size_t vl
) {
96 return __riscv_sf_vc_v_xvv_u64m2(p27_26
, vd
, vs2
, rs1
, vl
);
99 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m4(
100 // CHECK-RV64-NEXT: entry:
101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvv.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
104 vuint64m4_t
test_sf_vc_v_xvv_u64m4(vuint64m4_t vd
, vuint64m4_t vs2
, uint64_t rs1
, size_t vl
) {
105 return __riscv_sf_vc_v_xvv_u64m4(p27_26
, vd
, vs2
, rs1
, vl
);
108 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u64m8(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvv.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[RS1:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
113 vuint64m8_t
test_sf_vc_v_xvv_u64m8(vuint64m8_t vd
, vuint64m8_t vs2
, uint64_t rs1
, size_t vl
) {
114 return __riscv_sf_vc_v_xvv_u64m8(p27_26
, vd
, vs2
, rs1
, vl
);