[AMDGPU] Implement IR variant of isFMAFasterThanFMulAndFAdd (#121465)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / policy / overloaded / vrev8.c
blob921800b73a4834b5f6e13f74c665db471431c1f9
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +zvbb \
5 // RUN: -target-feature +zvbc \
6 // RUN: -target-feature +zvkb \
7 // RUN: -target-feature +zvkg \
8 // RUN: -target-feature +zvkned \
9 // RUN: -target-feature +zvknhb \
10 // RUN: -target-feature +zvksed \
11 // RUN: -target-feature +zvksh \
12 // RUN: -disable-O0-optnone \
13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
14 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
16 #include <riscv_vector.h>
18 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_tu
19 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
22 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
24 vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
25 return __riscv_vrev8_tu(maskedoff, vs2, vl);
28 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_tu
29 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
30 // CHECK-RV64-NEXT: entry:
31 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
32 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
34 vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
35 return __riscv_vrev8_tu(maskedoff, vs2, vl);
38 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_tu
39 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
40 // CHECK-RV64-NEXT: entry:
41 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
42 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
44 vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
45 return __riscv_vrev8_tu(maskedoff, vs2, vl);
48 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_tu
49 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
50 // CHECK-RV64-NEXT: entry:
51 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
52 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
54 vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
55 return __riscv_vrev8_tu(maskedoff, vs2, vl);
58 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_tu
59 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
60 // CHECK-RV64-NEXT: entry:
61 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
62 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
64 vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
65 return __riscv_vrev8_tu(maskedoff, vs2, vl);
68 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_tu
69 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
70 // CHECK-RV64-NEXT: entry:
71 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
72 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
74 vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
75 return __riscv_vrev8_tu(maskedoff, vs2, vl);
78 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_tu
79 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
80 // CHECK-RV64-NEXT: entry:
81 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
82 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
84 vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
85 return __riscv_vrev8_tu(maskedoff, vs2, vl);
88 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_tu
89 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
90 // CHECK-RV64-NEXT: entry:
91 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
92 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
94 vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
95 return __riscv_vrev8_tu(maskedoff, vs2, vl);
98 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_tu
99 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
100 // CHECK-RV64-NEXT: entry:
101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
102 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
104 vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
105 return __riscv_vrev8_tu(maskedoff, vs2, vl);
108 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_tu
109 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
110 // CHECK-RV64-NEXT: entry:
111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
112 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
114 vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
115 return __riscv_vrev8_tu(maskedoff, vs2, vl);
118 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_tu
119 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
122 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
124 vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
125 return __riscv_vrev8_tu(maskedoff, vs2, vl);
128 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_tu
129 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
132 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
134 vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
135 return __riscv_vrev8_tu(maskedoff, vs2, vl);
138 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_tu
139 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
140 // CHECK-RV64-NEXT: entry:
141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
142 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
144 vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
145 return __riscv_vrev8_tu(maskedoff, vs2, vl);
148 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_tu
149 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
150 // CHECK-RV64-NEXT: entry:
151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
152 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
154 vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
155 return __riscv_vrev8_tu(maskedoff, vs2, vl);
158 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_tu
159 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
160 // CHECK-RV64-NEXT: entry:
161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
162 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
164 vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
165 return __riscv_vrev8_tu(maskedoff, vs2, vl);
168 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_tu
169 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
170 // CHECK-RV64-NEXT: entry:
171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
172 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
174 vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
175 return __riscv_vrev8_tu(maskedoff, vs2, vl);
178 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_tu
179 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
180 // CHECK-RV64-NEXT: entry:
181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
182 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
184 vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
185 return __riscv_vrev8_tu(maskedoff, vs2, vl);
188 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_tu
189 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
190 // CHECK-RV64-NEXT: entry:
191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
192 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
194 vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
195 return __riscv_vrev8_tu(maskedoff, vs2, vl);
198 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_tu
199 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
200 // CHECK-RV64-NEXT: entry:
201 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
202 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
204 vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
205 return __riscv_vrev8_tu(maskedoff, vs2, vl);
208 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_tu
209 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
210 // CHECK-RV64-NEXT: entry:
211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
212 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
214 vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
215 return __riscv_vrev8_tu(maskedoff, vs2, vl);
218 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_tu
219 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
220 // CHECK-RV64-NEXT: entry:
221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
222 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
224 vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
225 return __riscv_vrev8_tu(maskedoff, vs2, vl);
228 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_tu
229 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
230 // CHECK-RV64-NEXT: entry:
231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
232 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
234 vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
235 return __riscv_vrev8_tu(maskedoff, vs2, vl);
238 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_tum
239 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
240 // CHECK-RV64-NEXT: entry:
241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
242 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
244 vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
245 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
248 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_tum
249 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
250 // CHECK-RV64-NEXT: entry:
251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
252 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
254 vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
255 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
258 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_tum
259 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
260 // CHECK-RV64-NEXT: entry:
261 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
262 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
264 vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
265 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
268 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_tum
269 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
270 // CHECK-RV64-NEXT: entry:
271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
272 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
274 vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
275 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
278 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_tum
279 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
280 // CHECK-RV64-NEXT: entry:
281 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
282 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
284 vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
285 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
288 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_tum
289 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
290 // CHECK-RV64-NEXT: entry:
291 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
292 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
294 vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
295 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
298 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_tum
299 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
300 // CHECK-RV64-NEXT: entry:
301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 2)
302 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
304 vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
305 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
308 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_tum
309 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
310 // CHECK-RV64-NEXT: entry:
311 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
312 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
314 vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
315 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
318 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_tum
319 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
320 // CHECK-RV64-NEXT: entry:
321 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
322 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
324 vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
325 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
328 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_tum
329 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
330 // CHECK-RV64-NEXT: entry:
331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
332 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
334 vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
335 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
338 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_tum
339 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
340 // CHECK-RV64-NEXT: entry:
341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
342 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
344 vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
345 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
348 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_tum
349 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
350 // CHECK-RV64-NEXT: entry:
351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
352 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
354 vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
355 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
358 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_tum
359 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
360 // CHECK-RV64-NEXT: entry:
361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
362 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
364 vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
365 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
368 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_tum
369 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
370 // CHECK-RV64-NEXT: entry:
371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
372 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
374 vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
375 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
378 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_tum
379 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
380 // CHECK-RV64-NEXT: entry:
381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
382 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
384 vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
385 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
388 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_tum
389 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
390 // CHECK-RV64-NEXT: entry:
391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
392 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
394 vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
395 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
398 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_tum
399 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
400 // CHECK-RV64-NEXT: entry:
401 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
402 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
404 vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
405 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
408 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_tum
409 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
410 // CHECK-RV64-NEXT: entry:
411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
412 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
414 vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
415 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
418 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_tum
419 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
420 // CHECK-RV64-NEXT: entry:
421 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
422 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
424 vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
425 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
428 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_tum
429 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
430 // CHECK-RV64-NEXT: entry:
431 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
432 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
434 vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
435 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
438 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_tum
439 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
440 // CHECK-RV64-NEXT: entry:
441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
442 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
444 vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
445 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
448 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_tum
449 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
450 // CHECK-RV64-NEXT: entry:
451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
452 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
454 vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
455 return __riscv_vrev8_tum(mask, maskedoff, vs2, vl);
458 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_tumu
459 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
460 // CHECK-RV64-NEXT: entry:
461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
462 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
464 vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
465 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
468 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_tumu
469 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
470 // CHECK-RV64-NEXT: entry:
471 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
472 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
474 vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
475 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
478 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_tumu
479 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
480 // CHECK-RV64-NEXT: entry:
481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
482 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
484 vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
485 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
488 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_tumu
489 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
490 // CHECK-RV64-NEXT: entry:
491 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
492 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
494 vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
495 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
498 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_tumu
499 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
500 // CHECK-RV64-NEXT: entry:
501 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
502 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
504 vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
505 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
508 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_tumu
509 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
510 // CHECK-RV64-NEXT: entry:
511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
512 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
514 vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
515 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
518 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_tumu
519 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
520 // CHECK-RV64-NEXT: entry:
521 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 0)
522 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
524 vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
525 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
528 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_tumu
529 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
530 // CHECK-RV64-NEXT: entry:
531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
532 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
534 vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
535 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
538 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_tumu
539 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
540 // CHECK-RV64-NEXT: entry:
541 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
542 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
544 vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
545 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
548 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_tumu
549 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
550 // CHECK-RV64-NEXT: entry:
551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
552 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
554 vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
555 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
558 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_tumu
559 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
560 // CHECK-RV64-NEXT: entry:
561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
562 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
564 vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
565 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
568 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_tumu
569 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
570 // CHECK-RV64-NEXT: entry:
571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
572 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
574 vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
575 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
578 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_tumu
579 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
580 // CHECK-RV64-NEXT: entry:
581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
582 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
584 vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
585 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
588 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_tumu
589 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
590 // CHECK-RV64-NEXT: entry:
591 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
592 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
594 vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
595 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
598 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_tumu
599 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
600 // CHECK-RV64-NEXT: entry:
601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
602 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
604 vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
605 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
608 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_tumu
609 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
610 // CHECK-RV64-NEXT: entry:
611 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
612 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
614 vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
615 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
618 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_tumu
619 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
620 // CHECK-RV64-NEXT: entry:
621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
622 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
624 vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
625 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
628 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_tumu
629 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
630 // CHECK-RV64-NEXT: entry:
631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
632 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
634 vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
635 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
638 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_tumu
639 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
640 // CHECK-RV64-NEXT: entry:
641 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
642 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
644 vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
645 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
648 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_tumu
649 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
650 // CHECK-RV64-NEXT: entry:
651 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
652 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
654 vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
655 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
658 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_tumu
659 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
660 // CHECK-RV64-NEXT: entry:
661 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
662 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
664 vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
665 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
668 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_tumu
669 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
670 // CHECK-RV64-NEXT: entry:
671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
672 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
674 vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
675 return __riscv_vrev8_tumu(mask, maskedoff, vs2, vl);
678 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_mu
679 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
680 // CHECK-RV64-NEXT: entry:
681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
682 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
684 vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
685 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
688 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_mu
689 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
690 // CHECK-RV64-NEXT: entry:
691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
692 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
694 vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
695 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
698 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_mu
699 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
700 // CHECK-RV64-NEXT: entry:
701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
702 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
704 vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
705 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
708 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_mu
709 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
710 // CHECK-RV64-NEXT: entry:
711 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
712 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
714 vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
715 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
718 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_mu
719 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
720 // CHECK-RV64-NEXT: entry:
721 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
722 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
724 vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
725 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
728 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_mu
729 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
730 // CHECK-RV64-NEXT: entry:
731 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
732 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
734 vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
735 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
738 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_mu
739 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
740 // CHECK-RV64-NEXT: entry:
741 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 1)
742 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
744 vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
745 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
748 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_mu
749 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
750 // CHECK-RV64-NEXT: entry:
751 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
752 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
754 vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
755 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
758 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_mu
759 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
760 // CHECK-RV64-NEXT: entry:
761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
762 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
764 vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
765 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
768 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_mu
769 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
770 // CHECK-RV64-NEXT: entry:
771 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
772 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
774 vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
775 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
778 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_mu
779 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
780 // CHECK-RV64-NEXT: entry:
781 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
782 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
784 vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
785 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
788 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_mu
789 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
790 // CHECK-RV64-NEXT: entry:
791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
792 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
794 vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
795 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
798 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_mu
799 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
800 // CHECK-RV64-NEXT: entry:
801 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
802 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
804 vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
805 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
808 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_mu
809 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
810 // CHECK-RV64-NEXT: entry:
811 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
812 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
814 vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
815 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
818 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_mu
819 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
820 // CHECK-RV64-NEXT: entry:
821 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
822 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
824 vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
825 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
828 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_mu
829 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
830 // CHECK-RV64-NEXT: entry:
831 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
832 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
834 vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
835 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
838 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_mu
839 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
840 // CHECK-RV64-NEXT: entry:
841 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
842 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
844 vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
845 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
848 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_mu
849 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
850 // CHECK-RV64-NEXT: entry:
851 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
852 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
854 vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
855 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
858 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_mu
859 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
860 // CHECK-RV64-NEXT: entry:
861 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
862 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
864 vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
865 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
868 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_mu
869 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
870 // CHECK-RV64-NEXT: entry:
871 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
872 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
874 vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
875 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
878 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_mu
879 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
880 // CHECK-RV64-NEXT: entry:
881 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
882 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
884 vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
885 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);
888 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_mu
889 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
890 // CHECK-RV64-NEXT: entry:
891 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
892 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
894 vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
895 return __riscv_vrev8_mu(mask, maskedoff, vs2, vl);