[Xtensa] Move XtensaUtils to MCTargetDesc
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / non-overloaded / bfloat16 / vsseg4e16.c
blob3795739c083258d586150b066abe67edf8f347f1
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v \
4 // RUN: -target-feature +zvfbfmin \
5 // RUN: -target-feature +zvfbfwma -disable-O0-optnone \
6 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
7 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
9 #include <riscv_vector.h>
11 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16mf4x4(
12 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
13 // CHECK-RV64-NEXT: entry:
14 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], i64 [[VL]], i64 4)
15 // CHECK-RV64-NEXT: ret void
17 void test_vsseg4e16_v_bf16mf4x4(__bf16 *rs1, vbfloat16mf4x4_t vs3, size_t vl) {
18 return __riscv_vsseg4e16_v_bf16mf4x4(rs1, vs3, vl);
21 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16mf2x4(
22 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
23 // CHECK-RV64-NEXT: entry:
24 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv4i8_4t.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], i64 [[VL]], i64 4)
25 // CHECK-RV64-NEXT: ret void
27 void test_vsseg4e16_v_bf16mf2x4(__bf16 *rs1, vbfloat16mf2x4_t vs3, size_t vl) {
28 return __riscv_vsseg4e16_v_bf16mf2x4(rs1, vs3, vl);
31 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16m1x4(
32 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
33 // CHECK-RV64-NEXT: entry:
34 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], i64 [[VL]], i64 4)
35 // CHECK-RV64-NEXT: ret void
37 void test_vsseg4e16_v_bf16m1x4(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl) {
38 return __riscv_vsseg4e16_v_bf16m1x4(rs1, vs3, vl);
41 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16m2x4(
42 // CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
43 // CHECK-RV64-NEXT: entry:
44 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], i64 [[VL]], i64 4)
45 // CHECK-RV64-NEXT: ret void
47 void test_vsseg4e16_v_bf16m2x4(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl) {
48 return __riscv_vsseg4e16_v_bf16m2x4(rs1, vs3, vl);
51 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16mf4x4_m(
52 // CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
53 // CHECK-RV64-NEXT: entry:
54 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
55 // CHECK-RV64-NEXT: ret void
57 void test_vsseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1,
58 vbfloat16mf4x4_t vs3, size_t vl) {
59 return __riscv_vsseg4e16_v_bf16mf4x4_m(vm, rs1, vs3, vl);
62 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16mf2x4_m(
63 // CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv4i8_4t.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
66 // CHECK-RV64-NEXT: ret void
68 void test_vsseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1,
69 vbfloat16mf2x4_t vs3, size_t vl) {
70 return __riscv_vsseg4e16_v_bf16mf2x4_m(vm, rs1, vs3, vl);
73 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16m1x4_m(
74 // CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
75 // CHECK-RV64-NEXT: entry:
76 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv8i8_4t.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
77 // CHECK-RV64-NEXT: ret void
79 void test_vsseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x4_t vs3,
80 size_t vl) {
81 return __riscv_vsseg4e16_v_bf16m1x4_m(vm, rs1, vs3, vl);
84 // CHECK-RV64-LABEL: define dso_local void @test_vsseg4e16_v_bf16m2x4_m(
85 // CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
86 // CHECK-RV64-NEXT: entry:
87 // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
88 // CHECK-RV64-NEXT: ret void
90 void test_vsseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x4_t vs3,
91 size_t vl) {
92 return __riscv_vsseg4e16_v_bf16m2x4_m(vm, rs1, vs3, vl);