Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / policy / overloaded / vbrev.c
blobb64e9660a971220ccb6c779ff8136da1cf5bb093
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +experimental-zvbb \
5 // RUN: -target-feature +experimental-zvbc \
6 // RUN: -target-feature +experimental-zvkg \
7 // RUN: -target-feature +experimental-zvkned \
8 // RUN: -target-feature +experimental-zvknhb \
9 // RUN: -target-feature +experimental-zvksed \
10 // RUN: -target-feature +experimental-zvksh -disable-O0-optnone \
11 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
12 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
14 #include <riscv_vector.h>
16 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev_v_u8mf8_tu
17 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
20 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
22 vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
23 return __riscv_vbrev_tu(maskedoff, vs2, vl);
26 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev_v_u8mf4_tu
27 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
28 // CHECK-RV64-NEXT: entry:
29 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
30 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
32 vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
33 return __riscv_vbrev_tu(maskedoff, vs2, vl);
36 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev_v_u8mf2_tu
37 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
38 // CHECK-RV64-NEXT: entry:
39 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
40 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
42 vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
43 return __riscv_vbrev_tu(maskedoff, vs2, vl);
46 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev_v_u8m1_tu
47 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
48 // CHECK-RV64-NEXT: entry:
49 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
50 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
52 vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
53 return __riscv_vbrev_tu(maskedoff, vs2, vl);
56 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev_v_u8m2_tu
57 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
58 // CHECK-RV64-NEXT: entry:
59 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
60 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
62 vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
63 return __riscv_vbrev_tu(maskedoff, vs2, vl);
66 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev_v_u8m4_tu
67 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
68 // CHECK-RV64-NEXT: entry:
69 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
70 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
72 vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
73 return __riscv_vbrev_tu(maskedoff, vs2, vl);
76 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev_v_u8m8_tu
77 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
78 // CHECK-RV64-NEXT: entry:
79 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
80 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
82 vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
83 return __riscv_vbrev_tu(maskedoff, vs2, vl);
86 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev_v_u16mf4_tu
87 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
88 // CHECK-RV64-NEXT: entry:
89 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
90 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
92 vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
93 return __riscv_vbrev_tu(maskedoff, vs2, vl);
96 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev_v_u16mf2_tu
97 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
100 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
102 vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
103 return __riscv_vbrev_tu(maskedoff, vs2, vl);
106 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev_v_u16m1_tu
107 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
108 // CHECK-RV64-NEXT: entry:
109 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
110 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
112 vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
113 return __riscv_vbrev_tu(maskedoff, vs2, vl);
116 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev_v_u16m2_tu
117 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
118 // CHECK-RV64-NEXT: entry:
119 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
120 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
122 vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
123 return __riscv_vbrev_tu(maskedoff, vs2, vl);
126 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev_v_u16m4_tu
127 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
128 // CHECK-RV64-NEXT: entry:
129 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
130 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
132 vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
133 return __riscv_vbrev_tu(maskedoff, vs2, vl);
136 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev_v_u16m8_tu
137 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
138 // CHECK-RV64-NEXT: entry:
139 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
140 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
142 vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
143 return __riscv_vbrev_tu(maskedoff, vs2, vl);
146 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev_v_u32mf2_tu
147 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
148 // CHECK-RV64-NEXT: entry:
149 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
150 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
152 vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
153 return __riscv_vbrev_tu(maskedoff, vs2, vl);
156 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev_v_u32m1_tu
157 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
158 // CHECK-RV64-NEXT: entry:
159 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
160 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
162 vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
163 return __riscv_vbrev_tu(maskedoff, vs2, vl);
166 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev_v_u32m2_tu
167 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
168 // CHECK-RV64-NEXT: entry:
169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
170 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
172 vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
173 return __riscv_vbrev_tu(maskedoff, vs2, vl);
176 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev_v_u32m4_tu
177 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
178 // CHECK-RV64-NEXT: entry:
179 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
180 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
182 vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
183 return __riscv_vbrev_tu(maskedoff, vs2, vl);
186 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev_v_u32m8_tu
187 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
188 // CHECK-RV64-NEXT: entry:
189 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
190 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
192 vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
193 return __riscv_vbrev_tu(maskedoff, vs2, vl);
196 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev_v_u64m1_tu
197 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
198 // CHECK-RV64-NEXT: entry:
199 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
200 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
202 vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
203 return __riscv_vbrev_tu(maskedoff, vs2, vl);
206 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev_v_u64m2_tu
207 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
208 // CHECK-RV64-NEXT: entry:
209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
210 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
212 vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
213 return __riscv_vbrev_tu(maskedoff, vs2, vl);
216 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev_v_u64m4_tu
217 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
218 // CHECK-RV64-NEXT: entry:
219 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
220 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
222 vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
223 return __riscv_vbrev_tu(maskedoff, vs2, vl);
226 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev_v_u64m8_tu
227 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
228 // CHECK-RV64-NEXT: entry:
229 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
230 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
232 vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
233 return __riscv_vbrev_tu(maskedoff, vs2, vl);
236 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev_v_u8mf8_tum
237 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
238 // CHECK-RV64-NEXT: entry:
239 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
240 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
242 vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
243 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
246 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev_v_u8mf4_tum
247 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
248 // CHECK-RV64-NEXT: entry:
249 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
250 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
252 vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
253 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
256 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev_v_u8mf2_tum
257 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
258 // CHECK-RV64-NEXT: entry:
259 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
260 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
262 vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
263 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
266 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev_v_u8m1_tum
267 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
268 // CHECK-RV64-NEXT: entry:
269 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
270 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
272 vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
273 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
276 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev_v_u8m2_tum
277 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
278 // CHECK-RV64-NEXT: entry:
279 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
280 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
282 vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
283 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
286 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev_v_u8m4_tum
287 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
288 // CHECK-RV64-NEXT: entry:
289 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
290 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
292 vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
293 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
296 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev_v_u8m8_tum
297 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
298 // CHECK-RV64-NEXT: entry:
299 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
300 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
302 vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
303 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
306 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev_v_u16mf4_tum
307 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
308 // CHECK-RV64-NEXT: entry:
309 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
310 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
312 vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
313 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
316 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev_v_u16mf2_tum
317 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
318 // CHECK-RV64-NEXT: entry:
319 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
320 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
322 vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
323 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
326 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev_v_u16m1_tum
327 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
328 // CHECK-RV64-NEXT: entry:
329 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
330 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
332 vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
333 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
336 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev_v_u16m2_tum
337 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
338 // CHECK-RV64-NEXT: entry:
339 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
340 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
342 vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
343 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
346 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev_v_u16m4_tum
347 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
348 // CHECK-RV64-NEXT: entry:
349 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
350 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
352 vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
353 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
356 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev_v_u16m8_tum
357 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
358 // CHECK-RV64-NEXT: entry:
359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
360 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
362 vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
363 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
366 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev_v_u32mf2_tum
367 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
368 // CHECK-RV64-NEXT: entry:
369 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
370 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
372 vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
373 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
376 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev_v_u32m1_tum
377 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
378 // CHECK-RV64-NEXT: entry:
379 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
380 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
382 vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
383 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
386 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev_v_u32m2_tum
387 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
388 // CHECK-RV64-NEXT: entry:
389 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
390 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
392 vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
393 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
396 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev_v_u32m4_tum
397 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
398 // CHECK-RV64-NEXT: entry:
399 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
400 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
402 vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
403 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
406 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev_v_u32m8_tum
407 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
408 // CHECK-RV64-NEXT: entry:
409 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
410 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
412 vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
413 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
416 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev_v_u64m1_tum
417 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
418 // CHECK-RV64-NEXT: entry:
419 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
420 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
422 vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
423 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
426 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev_v_u64m2_tum
427 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
428 // CHECK-RV64-NEXT: entry:
429 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
430 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
432 vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
433 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
436 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev_v_u64m4_tum
437 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
438 // CHECK-RV64-NEXT: entry:
439 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
440 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
442 vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
443 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
446 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev_v_u64m8_tum
447 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
448 // CHECK-RV64-NEXT: entry:
449 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
450 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
452 vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
453 return __riscv_vbrev_tum(mask, maskedoff, vs2, vl);
456 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev_v_u8mf8_tumu
457 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
458 // CHECK-RV64-NEXT: entry:
459 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
460 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
462 vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
463 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
466 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev_v_u8mf4_tumu
467 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
468 // CHECK-RV64-NEXT: entry:
469 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
470 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
472 vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
473 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
476 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev_v_u8mf2_tumu
477 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
478 // CHECK-RV64-NEXT: entry:
479 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
480 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
482 vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
483 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
486 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev_v_u8m1_tumu
487 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
488 // CHECK-RV64-NEXT: entry:
489 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
490 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
492 vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
493 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
496 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev_v_u8m2_tumu
497 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
498 // CHECK-RV64-NEXT: entry:
499 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
500 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
502 vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
503 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
506 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev_v_u8m4_tumu
507 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
508 // CHECK-RV64-NEXT: entry:
509 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
510 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
512 vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
513 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
516 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev_v_u8m8_tumu
517 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
518 // CHECK-RV64-NEXT: entry:
519 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
520 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
522 vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
523 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
526 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev_v_u16mf4_tumu
527 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
528 // CHECK-RV64-NEXT: entry:
529 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
530 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
532 vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
533 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
536 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev_v_u16mf2_tumu
537 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
538 // CHECK-RV64-NEXT: entry:
539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
540 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
542 vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
543 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
546 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev_v_u16m1_tumu
547 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
548 // CHECK-RV64-NEXT: entry:
549 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
550 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
552 vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
553 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
556 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev_v_u16m2_tumu
557 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
558 // CHECK-RV64-NEXT: entry:
559 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
560 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
562 vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
563 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
566 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev_v_u16m4_tumu
567 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
568 // CHECK-RV64-NEXT: entry:
569 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
570 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
572 vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
573 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
576 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev_v_u16m8_tumu
577 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
578 // CHECK-RV64-NEXT: entry:
579 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
580 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
582 vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
583 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
586 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev_v_u32mf2_tumu
587 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
588 // CHECK-RV64-NEXT: entry:
589 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
590 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
592 vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
593 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
596 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev_v_u32m1_tumu
597 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
598 // CHECK-RV64-NEXT: entry:
599 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
600 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
602 vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
603 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
606 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev_v_u32m2_tumu
607 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
608 // CHECK-RV64-NEXT: entry:
609 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
610 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
612 vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
613 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
616 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev_v_u32m4_tumu
617 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
618 // CHECK-RV64-NEXT: entry:
619 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
620 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
622 vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
623 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
626 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev_v_u32m8_tumu
627 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
628 // CHECK-RV64-NEXT: entry:
629 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
630 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
632 vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
633 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
636 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev_v_u64m1_tumu
637 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
638 // CHECK-RV64-NEXT: entry:
639 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
640 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
642 vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
643 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
646 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev_v_u64m2_tumu
647 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
648 // CHECK-RV64-NEXT: entry:
649 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
650 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
652 vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
653 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
656 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev_v_u64m4_tumu
657 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
658 // CHECK-RV64-NEXT: entry:
659 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
660 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
662 vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
663 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
666 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev_v_u64m8_tumu
667 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
668 // CHECK-RV64-NEXT: entry:
669 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
670 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
672 vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
673 return __riscv_vbrev_tumu(mask, maskedoff, vs2, vl);
676 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vbrev_v_u8mf8_mu
677 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
678 // CHECK-RV64-NEXT: entry:
679 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
680 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
682 vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
683 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
686 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vbrev_v_u8mf4_mu
687 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
688 // CHECK-RV64-NEXT: entry:
689 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
690 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
692 vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
693 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
696 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vbrev_v_u8mf2_mu
697 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
698 // CHECK-RV64-NEXT: entry:
699 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
700 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
702 vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
703 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
706 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vbrev_v_u8m1_mu
707 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
708 // CHECK-RV64-NEXT: entry:
709 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
710 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
712 vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
713 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
716 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vbrev_v_u8m2_mu
717 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
718 // CHECK-RV64-NEXT: entry:
719 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
720 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
722 vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
723 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
726 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vbrev_v_u8m4_mu
727 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
728 // CHECK-RV64-NEXT: entry:
729 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
730 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
732 vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
733 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
736 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vbrev_v_u8m8_mu
737 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
738 // CHECK-RV64-NEXT: entry:
739 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
740 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
742 vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
743 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
746 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vbrev_v_u16mf4_mu
747 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
748 // CHECK-RV64-NEXT: entry:
749 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
750 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
752 vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
753 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
756 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vbrev_v_u16mf2_mu
757 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
758 // CHECK-RV64-NEXT: entry:
759 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
760 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
762 vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
763 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
766 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vbrev_v_u16m1_mu
767 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
768 // CHECK-RV64-NEXT: entry:
769 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
770 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
772 vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
773 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
776 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vbrev_v_u16m2_mu
777 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
778 // CHECK-RV64-NEXT: entry:
779 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
780 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
782 vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
783 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
786 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vbrev_v_u16m4_mu
787 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
788 // CHECK-RV64-NEXT: entry:
789 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
790 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
792 vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
793 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
796 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vbrev_v_u16m8_mu
797 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
798 // CHECK-RV64-NEXT: entry:
799 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
800 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
802 vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
803 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
806 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vbrev_v_u32mf2_mu
807 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
808 // CHECK-RV64-NEXT: entry:
809 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
810 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
812 vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
813 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
816 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vbrev_v_u32m1_mu
817 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
818 // CHECK-RV64-NEXT: entry:
819 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
820 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
822 vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
823 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
826 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vbrev_v_u32m2_mu
827 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
828 // CHECK-RV64-NEXT: entry:
829 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
830 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
832 vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
833 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
836 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vbrev_v_u32m4_mu
837 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
838 // CHECK-RV64-NEXT: entry:
839 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
840 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
842 vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
843 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
846 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vbrev_v_u32m8_mu
847 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
848 // CHECK-RV64-NEXT: entry:
849 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
850 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
852 vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
853 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
856 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vbrev_v_u64m1_mu
857 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
858 // CHECK-RV64-NEXT: entry:
859 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
860 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
862 vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
863 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
866 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vbrev_v_u64m2_mu
867 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
868 // CHECK-RV64-NEXT: entry:
869 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
870 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
872 vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
873 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
876 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vbrev_v_u64m4_mu
877 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
878 // CHECK-RV64-NEXT: entry:
879 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
880 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
882 vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
883 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);
886 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vbrev_v_u64m8_mu
887 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
888 // CHECK-RV64-NEXT: entry:
889 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
890 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
892 vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
893 return __riscv_vbrev_mu(mask, maskedoff, vs2, vl);