[AMDGPU] Implement IR variant of isFMAFasterThanFMulAndFAdd (#121465)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / policy / overloaded / vlseg5e64ff.c
blob9bc778dfd3a738f4df867f584b8dfa831660fff7
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_f64m1x5_tu
11 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6)
14 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
15 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
16 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
17 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
19 vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const double *base, size_t *new_vl, size_t vl) {
20 return __riscv_vlseg5e64ff_tu(maskedoff_tuple, base, new_vl, vl);
23 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_i64m1x5_tu
24 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
25 // CHECK-RV64-NEXT: entry:
26 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6)
27 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
28 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
29 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
30 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
32 vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) {
33 return __riscv_vlseg5e64ff_tu(maskedoff_tuple, base, new_vl, vl);
36 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_u64m1x5_tu
37 // CHECK-RV64-SAME: (target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
38 // CHECK-RV64-NEXT: entry:
39 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.triscv.vector.tuple_nxv8i8_5t.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], i64 [[VL]], i64 6)
40 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
41 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
42 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
43 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
45 vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) {
46 return __riscv_vlseg5e64ff_tu(maskedoff_tuple, base, new_vl, vl);
49 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_f64m1x5_tum
50 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
51 // CHECK-RV64-NEXT: entry:
52 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 6)
53 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
54 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
55 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
56 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
58 vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, size_t *new_vl, size_t vl) {
59 return __riscv_vlseg5e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl);
62 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_i64m1x5_tum
63 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 6)
66 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
67 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
68 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
69 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
71 vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) {
72 return __riscv_vlseg5e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl);
75 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_u64m1x5_tum
76 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
77 // CHECK-RV64-NEXT: entry:
78 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2, i64 6)
79 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
80 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
81 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
82 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
84 vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) {
85 return __riscv_vlseg5e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl);
88 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_f64m1x5_tumu
89 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 6)
92 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
93 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
94 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
95 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
97 vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, size_t *new_vl, size_t vl) {
98 return __riscv_vlseg5e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl);
101 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_i64m1x5_tumu
102 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
103 // CHECK-RV64-NEXT: entry:
104 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 6)
105 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
106 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
107 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
108 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
110 vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) {
111 return __riscv_vlseg5e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl);
114 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_u64m1x5_tumu
115 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
116 // CHECK-RV64-NEXT: entry:
117 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0, i64 6)
118 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
119 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
120 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
121 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
123 vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) {
124 return __riscv_vlseg5e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl);
127 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_f64m1x5_mu
128 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
129 // CHECK-RV64-NEXT: entry:
130 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 6)
131 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
132 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
133 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
134 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
136 vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const double *base, size_t *new_vl, size_t vl) {
137 return __riscv_vlseg5e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl);
140 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_i64m1x5_mu
141 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 6)
144 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
145 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
146 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
147 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
149 vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) {
150 return __riscv_vlseg5e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl);
153 // CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vlseg5e64ff_v_u64m1x5_mu
154 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE:%.*]], ptr noundef [[BASE:%.*]], ptr noundef [[NEW_VL:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
155 // CHECK-RV64-NEXT: entry:
156 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } @llvm.riscv.vlseg5ff.mask.triscv.vector.tuple_nxv8i8_5t.i64.nxv1i1(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[MASKEDOFF_TUPLE]], ptr [[BASE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1, i64 6)
157 // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 0
158 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { target("riscv.vector.tuple", <vscale x 8 x i8>, 5), i64 } [[TMP0]], 1
159 // CHECK-RV64-NEXT: store i64 [[TMP2]], ptr [[NEW_VL]], align 8
160 // CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP1]]
162 vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) {
163 return __riscv_vlseg5e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl);