Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / policy / overloaded / vrev8.c
blob0f9fe962bab39775dfb3d94bc2a02fc67ed378be
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +experimental-zvbb \
5 // RUN: -target-feature +experimental-zvbc \
6 // RUN: -target-feature +experimental-zvkb \
7 // RUN: -target-feature +experimental-zvkg \
8 // RUN: -target-feature +experimental-zvkned \
9 // RUN: -target-feature +experimental-zvknhb \
10 // RUN: -target-feature +experimental-zvksed \
11 // RUN: -target-feature +experimental-zvksh -disable-O0-optnone \
12 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
13 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
15 #include <riscv_vector.h>
17 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_tu
18 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
21 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
23 vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
24 return __riscv_vrev8_tu(maskedoff, vs2, vl);
27 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_tu
28 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
29 // CHECK-RV64-NEXT: entry:
30 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
31 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
33 vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
34 return __riscv_vrev8_tu(maskedoff, vs2, vl);
37 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_tu
38 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
39 // CHECK-RV64-NEXT: entry:
40 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
41 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
43 vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
44 return __riscv_vrev8_tu(maskedoff, vs2, vl);
47 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_tu
48 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
49 // CHECK-RV64-NEXT: entry:
50 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
51 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
53 vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
54 return __riscv_vrev8_tu(maskedoff, vs2, vl);
57 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_tu
58 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
59 // CHECK-RV64-NEXT: entry:
60 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
61 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
63 vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
64 return __riscv_vrev8_tu(maskedoff, vs2, vl);
67 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_tu
68 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
69 // CHECK-RV64-NEXT: entry:
70 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
71 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
73 vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
74 return __riscv_vrev8_tu(maskedoff, vs2, vl);
77 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_tu
78 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
79 // CHECK-RV64-NEXT: entry:
80 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
81 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
83 vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
84 return __riscv_vrev8_tu(maskedoff, vs2, vl);
87 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_tu
88 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
89 // CHECK-RV64-NEXT: entry:
90 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
91 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
93 vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
94 return __riscv_vrev8_tu(maskedoff, vs2, vl);
97 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_tu
98 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
99 // CHECK-RV64-NEXT: entry:
100 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
101 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
103 vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
104 return __riscv_vrev8_tu(maskedoff, vs2, vl);
107 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_tu
108 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
111 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
113 vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
114 return __riscv_vrev8_tu(maskedoff, vs2, vl);
117 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_tu
118 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
119 // CHECK-RV64-NEXT: entry:
120 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
121 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
123 vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
124 return __riscv_vrev8_tu(maskedoff, vs2, vl);
127 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_tu
128 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
129 // CHECK-RV64-NEXT: entry:
130 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
131 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
133 vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
134 return __riscv_vrev8_tu(maskedoff, vs2, vl);
137 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_tu
138 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
139 // CHECK-RV64-NEXT: entry:
140 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
141 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
143 vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
144 return __riscv_vrev8_tu(maskedoff, vs2, vl);
147 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_tu
148 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
149 // CHECK-RV64-NEXT: entry:
150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
151 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
153 vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
154 return __riscv_vrev8_tu(maskedoff, vs2, vl);
157 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_tu
158 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
159 // CHECK-RV64-NEXT: entry:
160 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
161 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
163 vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
164 return __riscv_vrev8_tu(maskedoff, vs2, vl);
167 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_tu
168 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
169 // CHECK-RV64-NEXT: entry:
170 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
171 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
173 vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
174 return __riscv_vrev8_tu(maskedoff, vs2, vl);
177 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_tu
178 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
179 // CHECK-RV64-NEXT: entry:
180 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
181 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
183 vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
184 return __riscv_vrev8_tu(maskedoff, vs2, vl);
187 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_tu
188 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
189 // CHECK-RV64-NEXT: entry:
190 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
191 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
193 vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
194 return __riscv_vrev8_tu(maskedoff, vs2, vl);
197 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_tu
198 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
199 // CHECK-RV64-NEXT: entry:
200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
201 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
203 vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
204 return __riscv_vrev8_tu(maskedoff, vs2, vl);
207 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_tu
208 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
209 // CHECK-RV64-NEXT: entry:
210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
211 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
213 vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
214 return __riscv_vrev8_tu(maskedoff, vs2, vl);
217 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_tu
218 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
219 // CHECK-RV64-NEXT: entry:
220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
221 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
223 vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
224 return __riscv_vrev8_tu(maskedoff, vs2, vl);
227 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_tu
228 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
229 // CHECK-RV64-NEXT: entry:
230 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
231 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
233 vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
234 return __riscv_vrev8_tu(maskedoff, vs2, vl);
237 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_tum
238 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
239 // CHECK-RV64-NEXT: entry:
240 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
241 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
243 vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
244 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
247 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_tum
248 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
249 // CHECK-RV64-NEXT: entry:
250 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
251 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
253 vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
254 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
257 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_tum
258 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
259 // CHECK-RV64-NEXT: entry:
260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
261 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
263 vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
264 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
267 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_tum
268 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
269 // CHECK-RV64-NEXT: entry:
270 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
271 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
273 vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
274 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
277 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_tum
278 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
279 // CHECK-RV64-NEXT: entry:
280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
281 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
283 vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
284 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
287 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_tum
288 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
289 // CHECK-RV64-NEXT: entry:
290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
291 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
293 vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
294 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
297 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_tum
298 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
299 // CHECK-RV64-NEXT: entry:
300 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
301 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
303 vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
304 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
307 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_tum
308 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
309 // CHECK-RV64-NEXT: entry:
310 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
311 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
313 vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
314 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
317 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_tum
318 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
319 // CHECK-RV64-NEXT: entry:
320 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
321 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
323 vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
324 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
327 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_tum
328 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
329 // CHECK-RV64-NEXT: entry:
330 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
331 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
333 vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
334 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
337 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_tum
338 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
339 // CHECK-RV64-NEXT: entry:
340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
341 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
343 vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
344 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
347 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_tum
348 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
349 // CHECK-RV64-NEXT: entry:
350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
351 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
353 vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
354 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
357 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_tum
358 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
359 // CHECK-RV64-NEXT: entry:
360 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
361 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
363 vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
364 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
367 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_tum
368 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
371 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
373 vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
374 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
377 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_tum
378 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
379 // CHECK-RV64-NEXT: entry:
380 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
381 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
383 vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
384 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
387 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_tum
388 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
389 // CHECK-RV64-NEXT: entry:
390 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
391 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
393 vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
394 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
397 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_tum
398 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
399 // CHECK-RV64-NEXT: entry:
400 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
401 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
403 vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
404 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
407 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_tum
408 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
409 // CHECK-RV64-NEXT: entry:
410 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
411 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
413 vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
414 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
417 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_tum
418 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
419 // CHECK-RV64-NEXT: entry:
420 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
421 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
423 vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
424 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
427 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_tum
428 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
429 // CHECK-RV64-NEXT: entry:
430 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
431 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
433 vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
434 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
437 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_tum
438 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
439 // CHECK-RV64-NEXT: entry:
440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
441 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
443 vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
444 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
447 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_tum
448 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
449 // CHECK-RV64-NEXT: entry:
450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
451 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
453 vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
454 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
457 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_tumu
458 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
459 // CHECK-RV64-NEXT: entry:
460 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
461 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
463 vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
464 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
467 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_tumu
468 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
469 // CHECK-RV64-NEXT: entry:
470 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
471 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
473 vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
474 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
477 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_tumu
478 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
479 // CHECK-RV64-NEXT: entry:
480 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
481 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
483 vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
484 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
487 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_tumu
488 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
489 // CHECK-RV64-NEXT: entry:
490 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
491 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
493 vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
494 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
497 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_tumu
498 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
499 // CHECK-RV64-NEXT: entry:
500 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
501 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
503 vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
504 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
507 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_tumu
508 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
509 // CHECK-RV64-NEXT: entry:
510 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
511 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
513 vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
514 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
517 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_tumu
518 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
519 // CHECK-RV64-NEXT: entry:
520 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
521 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
523 vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
524 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
527 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_tumu
528 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
529 // CHECK-RV64-NEXT: entry:
530 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
531 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
533 vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
534 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
537 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_tumu
538 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
539 // CHECK-RV64-NEXT: entry:
540 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
541 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
543 vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
544 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
547 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_tumu
548 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
549 // CHECK-RV64-NEXT: entry:
550 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
551 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
553 vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
554 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
557 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_tumu
558 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
559 // CHECK-RV64-NEXT: entry:
560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
561 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
563 vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
564 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
567 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_tumu
568 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
569 // CHECK-RV64-NEXT: entry:
570 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
571 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
573 vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
574 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
577 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_tumu
578 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
579 // CHECK-RV64-NEXT: entry:
580 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
581 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
583 vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
584 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
587 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_tumu
588 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
589 // CHECK-RV64-NEXT: entry:
590 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
591 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
593 vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
594 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
597 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_tumu
598 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
599 // CHECK-RV64-NEXT: entry:
600 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
601 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
603 vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
604 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
607 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_tumu
608 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
609 // CHECK-RV64-NEXT: entry:
610 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
611 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
613 vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
614 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
617 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_tumu
618 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
619 // CHECK-RV64-NEXT: entry:
620 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
621 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
623 vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
624 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
627 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_tumu
628 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
629 // CHECK-RV64-NEXT: entry:
630 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
631 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
633 vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
634 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
637 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_tumu
638 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
639 // CHECK-RV64-NEXT: entry:
640 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
641 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
643 vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
644 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
647 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_tumu
648 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
649 // CHECK-RV64-NEXT: entry:
650 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
651 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
653 vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
654 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
657 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_tumu
658 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
659 // CHECK-RV64-NEXT: entry:
660 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
661 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
663 vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
664 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
667 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_tumu
668 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
669 // CHECK-RV64-NEXT: entry:
670 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
671 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
673 vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
674 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
677 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_mu
678 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
679 // CHECK-RV64-NEXT: entry:
680 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
681 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
683 vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
684 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
687 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_mu
688 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
689 // CHECK-RV64-NEXT: entry:
690 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
691 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
693 vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
694 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
697 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_mu
698 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
699 // CHECK-RV64-NEXT: entry:
700 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
701 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
703 vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
704 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
707 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_mu
708 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
709 // CHECK-RV64-NEXT: entry:
710 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
711 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
713 vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
714 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
717 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_mu
718 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
719 // CHECK-RV64-NEXT: entry:
720 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
721 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
723 vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
724 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
727 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_mu
728 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
729 // CHECK-RV64-NEXT: entry:
730 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
731 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
733 vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
734 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
737 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_mu
738 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
739 // CHECK-RV64-NEXT: entry:
740 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
741 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
743 vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
744 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
747 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_mu
748 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
749 // CHECK-RV64-NEXT: entry:
750 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
751 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
753 vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
754 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
757 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_mu
758 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
759 // CHECK-RV64-NEXT: entry:
760 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
761 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
763 vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
764 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
767 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_mu
768 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
769 // CHECK-RV64-NEXT: entry:
770 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
771 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
773 vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
774 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
777 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_mu
778 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
779 // CHECK-RV64-NEXT: entry:
780 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
781 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
783 vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
784 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
787 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_mu
788 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
789 // CHECK-RV64-NEXT: entry:
790 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
791 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
793 vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
794 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
797 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_mu
798 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
799 // CHECK-RV64-NEXT: entry:
800 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
801 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
803 vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
804 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
807 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_mu
808 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
809 // CHECK-RV64-NEXT: entry:
810 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
811 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
813 vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
814 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
817 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_mu
818 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
819 // CHECK-RV64-NEXT: entry:
820 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
821 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
823 vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
824 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
827 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_mu
828 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
829 // CHECK-RV64-NEXT: entry:
830 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
831 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
833 vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
834 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
837 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_mu
838 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
839 // CHECK-RV64-NEXT: entry:
840 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
841 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
843 vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
844 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
847 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_mu
848 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
849 // CHECK-RV64-NEXT: entry:
850 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
851 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
853 vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
854 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
857 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_mu
858 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
859 // CHECK-RV64-NEXT: entry:
860 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
861 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
863 vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
864 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
867 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_mu
868 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
869 // CHECK-RV64-NEXT: entry:
870 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
871 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
873 vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
874 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
877 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_mu
878 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
879 // CHECK-RV64-NEXT: entry:
880 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
881 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
883 vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
884 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
887 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_mu
888 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
889 // CHECK-RV64-NEXT: entry:
890 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
891 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
893 vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
894 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);