[AMDGPU] Implement IR variant of isFMAFasterThanFMulAndFAdd (#121465)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / overloaded / vssseg6e64.c
blobfdc2f16db7d4db97cb975521fb1305d4142cd3cc
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1x6
11 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]], i64 6)
14 // CHECK-RV64-NEXT: ret void
16 void test_vssseg6e64_v_f64m1x6(double *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) {
17 return __riscv_vssseg6e64(base, bstride, v_tuple, vl);
20 // CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1x6
21 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]], i64 6)
24 // CHECK-RV64-NEXT: ret void
26 void test_vssseg6e64_v_i64m1x6(int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) {
27 return __riscv_vssseg6e64(base, bstride, v_tuple, vl);
30 // CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1x6
31 // CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.triscv.vector.tuple_nxv8i8_6t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE]], ptr [[BASE]], i64 [[BSTRIDE]], i64 [[VL]], i64 6)
34 // CHECK-RV64-NEXT: ret void
36 void test_vssseg6e64_v_u64m1x6(uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) {
37 return __riscv_vssseg6e64(base, bstride, v_tuple, vl);
40 // CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_f64m1x6_m
41 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 6)
44 // CHECK-RV64-NEXT: ret void
46 void test_vssseg6e64_v_f64m1x6_m(vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) {
47 return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl);
50 // CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_i64m1x6_m
51 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 6)
54 // CHECK-RV64-NEXT: ret void
56 void test_vssseg6e64_v_i64m1x6_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) {
57 return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl);
60 // CHECK-RV64-LABEL: define dso_local void @test_vssseg6e64_v_u64m1x6_m
61 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[BSTRIDE:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.triscv.vector.tuple_nxv8i8_6t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[V_TUPLE]], ptr [[BASE]], i64 [[BSTRIDE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 6)
64 // CHECK-RV64-NEXT: ret void
66 void test_vssseg6e64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) {
67 return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl);