1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +experimental-zvbb \
5 // RUN: -target-feature +experimental-zvbc \
6 // RUN: -target-feature +experimental-zvkb \
7 // RUN: -target-feature +experimental-zvkg \
8 // RUN: -target-feature +experimental-zvkned \
9 // RUN: -target-feature +experimental-zvknhb \
10 // RUN: -target-feature +experimental-zvksed \
11 // RUN: -target-feature +experimental-zvksh -disable-O0-optnone \
12 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
13 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
15 #include <riscv_vector.h>
17 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8
18 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
21 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
23 vuint8mf8_t
test_vrev8_v_u8mf8(vuint8mf8_t vs2
, size_t vl
) {
24 return __riscv_vrev8_v_u8mf8(vs2
, vl
);
27 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4
28 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
29 // CHECK-RV64-NEXT: entry:
30 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
31 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
33 vuint8mf4_t
test_vrev8_v_u8mf4(vuint8mf4_t vs2
, size_t vl
) {
34 return __riscv_vrev8_v_u8mf4(vs2
, vl
);
37 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2
38 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
39 // CHECK-RV64-NEXT: entry:
40 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
41 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
43 vuint8mf2_t
test_vrev8_v_u8mf2(vuint8mf2_t vs2
, size_t vl
) {
44 return __riscv_vrev8_v_u8mf2(vs2
, vl
);
47 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1
48 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
49 // CHECK-RV64-NEXT: entry:
50 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
51 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
53 vuint8m1_t
test_vrev8_v_u8m1(vuint8m1_t vs2
, size_t vl
) {
54 return __riscv_vrev8_v_u8m1(vs2
, vl
);
57 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2
58 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
59 // CHECK-RV64-NEXT: entry:
60 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
61 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
63 vuint8m2_t
test_vrev8_v_u8m2(vuint8m2_t vs2
, size_t vl
) {
64 return __riscv_vrev8_v_u8m2(vs2
, vl
);
67 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4
68 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
69 // CHECK-RV64-NEXT: entry:
70 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
71 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
73 vuint8m4_t
test_vrev8_v_u8m4(vuint8m4_t vs2
, size_t vl
) {
74 return __riscv_vrev8_v_u8m4(vs2
, vl
);
77 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8
78 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
79 // CHECK-RV64-NEXT: entry:
80 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
81 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
83 vuint8m8_t
test_vrev8_v_u8m8(vuint8m8_t vs2
, size_t vl
) {
84 return __riscv_vrev8_v_u8m8(vs2
, vl
);
87 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4
88 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
89 // CHECK-RV64-NEXT: entry:
90 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
91 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
93 vuint16mf4_t
test_vrev8_v_u16mf4(vuint16mf4_t vs2
, size_t vl
) {
94 return __riscv_vrev8_v_u16mf4(vs2
, vl
);
97 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2
98 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
99 // CHECK-RV64-NEXT: entry:
100 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
101 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
103 vuint16mf2_t
test_vrev8_v_u16mf2(vuint16mf2_t vs2
, size_t vl
) {
104 return __riscv_vrev8_v_u16mf2(vs2
, vl
);
107 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1
108 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
111 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
113 vuint16m1_t
test_vrev8_v_u16m1(vuint16m1_t vs2
, size_t vl
) {
114 return __riscv_vrev8_v_u16m1(vs2
, vl
);
117 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2
118 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
119 // CHECK-RV64-NEXT: entry:
120 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
121 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
123 vuint16m2_t
test_vrev8_v_u16m2(vuint16m2_t vs2
, size_t vl
) {
124 return __riscv_vrev8_v_u16m2(vs2
, vl
);
127 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4
128 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
129 // CHECK-RV64-NEXT: entry:
130 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
131 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
133 vuint16m4_t
test_vrev8_v_u16m4(vuint16m4_t vs2
, size_t vl
) {
134 return __riscv_vrev8_v_u16m4(vs2
, vl
);
137 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8
138 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
139 // CHECK-RV64-NEXT: entry:
140 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
141 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
143 vuint16m8_t
test_vrev8_v_u16m8(vuint16m8_t vs2
, size_t vl
) {
144 return __riscv_vrev8_v_u16m8(vs2
, vl
);
147 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2
148 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
149 // CHECK-RV64-NEXT: entry:
150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
151 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
153 vuint32mf2_t
test_vrev8_v_u32mf2(vuint32mf2_t vs2
, size_t vl
) {
154 return __riscv_vrev8_v_u32mf2(vs2
, vl
);
157 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1
158 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
159 // CHECK-RV64-NEXT: entry:
160 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
161 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
163 vuint32m1_t
test_vrev8_v_u32m1(vuint32m1_t vs2
, size_t vl
) {
164 return __riscv_vrev8_v_u32m1(vs2
, vl
);
167 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2
168 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
169 // CHECK-RV64-NEXT: entry:
170 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
171 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
173 vuint32m2_t
test_vrev8_v_u32m2(vuint32m2_t vs2
, size_t vl
) {
174 return __riscv_vrev8_v_u32m2(vs2
, vl
);
177 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4
178 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
179 // CHECK-RV64-NEXT: entry:
180 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
181 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
183 vuint32m4_t
test_vrev8_v_u32m4(vuint32m4_t vs2
, size_t vl
) {
184 return __riscv_vrev8_v_u32m4(vs2
, vl
);
187 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8
188 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
189 // CHECK-RV64-NEXT: entry:
190 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
191 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
193 vuint32m8_t
test_vrev8_v_u32m8(vuint32m8_t vs2
, size_t vl
) {
194 return __riscv_vrev8_v_u32m8(vs2
, vl
);
197 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1
198 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
199 // CHECK-RV64-NEXT: entry:
200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
201 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
203 vuint64m1_t
test_vrev8_v_u64m1(vuint64m1_t vs2
, size_t vl
) {
204 return __riscv_vrev8_v_u64m1(vs2
, vl
);
207 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2
208 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
209 // CHECK-RV64-NEXT: entry:
210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
211 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
213 vuint64m2_t
test_vrev8_v_u64m2(vuint64m2_t vs2
, size_t vl
) {
214 return __riscv_vrev8_v_u64m2(vs2
, vl
);
217 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4
218 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
219 // CHECK-RV64-NEXT: entry:
220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
221 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
223 vuint64m4_t
test_vrev8_v_u64m4(vuint64m4_t vs2
, size_t vl
) {
224 return __riscv_vrev8_v_u64m4(vs2
, vl
);
227 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8
228 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
229 // CHECK-RV64-NEXT: entry:
230 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
231 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
233 vuint64m8_t
test_vrev8_v_u64m8(vuint64m8_t vs2
, size_t vl
) {
234 return __riscv_vrev8_v_u64m8(vs2
, vl
);
237 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vrev8_v_u8mf8_m
238 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
239 // CHECK-RV64-NEXT: entry:
240 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
241 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
243 vuint8mf8_t
test_vrev8_v_u8mf8_m(vbool64_t mask
, vuint8mf8_t vs2
, size_t vl
) {
244 return __riscv_vrev8_v_u8mf8_m(mask
, vs2
, vl
);
247 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vrev8_v_u8mf4_m
248 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
249 // CHECK-RV64-NEXT: entry:
250 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
251 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
253 vuint8mf4_t
test_vrev8_v_u8mf4_m(vbool32_t mask
, vuint8mf4_t vs2
, size_t vl
) {
254 return __riscv_vrev8_v_u8mf4_m(mask
, vs2
, vl
);
257 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vrev8_v_u8mf2_m
258 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
259 // CHECK-RV64-NEXT: entry:
260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
261 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
263 vuint8mf2_t
test_vrev8_v_u8mf2_m(vbool16_t mask
, vuint8mf2_t vs2
, size_t vl
) {
264 return __riscv_vrev8_v_u8mf2_m(mask
, vs2
, vl
);
267 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vrev8_v_u8m1_m
268 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
269 // CHECK-RV64-NEXT: entry:
270 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
271 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
273 vuint8m1_t
test_vrev8_v_u8m1_m(vbool8_t mask
, vuint8m1_t vs2
, size_t vl
) {
274 return __riscv_vrev8_v_u8m1_m(mask
, vs2
, vl
);
277 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vrev8_v_u8m2_m
278 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
279 // CHECK-RV64-NEXT: entry:
280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
281 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
283 vuint8m2_t
test_vrev8_v_u8m2_m(vbool4_t mask
, vuint8m2_t vs2
, size_t vl
) {
284 return __riscv_vrev8_v_u8m2_m(mask
, vs2
, vl
);
287 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vrev8_v_u8m4_m
288 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
289 // CHECK-RV64-NEXT: entry:
290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
291 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
293 vuint8m4_t
test_vrev8_v_u8m4_m(vbool2_t mask
, vuint8m4_t vs2
, size_t vl
) {
294 return __riscv_vrev8_v_u8m4_m(mask
, vs2
, vl
);
297 // CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vrev8_v_u8m8_m
298 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
299 // CHECK-RV64-NEXT: entry:
300 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[MASK]], i64 [[VL]], i64 3)
301 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
303 vuint8m8_t
test_vrev8_v_u8m8_m(vbool1_t mask
, vuint8m8_t vs2
, size_t vl
) {
304 return __riscv_vrev8_v_u8m8_m(mask
, vs2
, vl
);
307 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vrev8_v_u16mf4_m
308 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
309 // CHECK-RV64-NEXT: entry:
310 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
311 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
313 vuint16mf4_t
test_vrev8_v_u16mf4_m(vbool64_t mask
, vuint16mf4_t vs2
, size_t vl
) {
314 return __riscv_vrev8_v_u16mf4_m(mask
, vs2
, vl
);
317 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vrev8_v_u16mf2_m
318 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
319 // CHECK-RV64-NEXT: entry:
320 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
321 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
323 vuint16mf2_t
test_vrev8_v_u16mf2_m(vbool32_t mask
, vuint16mf2_t vs2
, size_t vl
) {
324 return __riscv_vrev8_v_u16mf2_m(mask
, vs2
, vl
);
327 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vrev8_v_u16m1_m
328 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
329 // CHECK-RV64-NEXT: entry:
330 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
331 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
333 vuint16m1_t
test_vrev8_v_u16m1_m(vbool16_t mask
, vuint16m1_t vs2
, size_t vl
) {
334 return __riscv_vrev8_v_u16m1_m(mask
, vs2
, vl
);
337 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vrev8_v_u16m2_m
338 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
339 // CHECK-RV64-NEXT: entry:
340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
341 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
343 vuint16m2_t
test_vrev8_v_u16m2_m(vbool8_t mask
, vuint16m2_t vs2
, size_t vl
) {
344 return __riscv_vrev8_v_u16m2_m(mask
, vs2
, vl
);
347 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vrev8_v_u16m4_m
348 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
349 // CHECK-RV64-NEXT: entry:
350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
351 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
353 vuint16m4_t
test_vrev8_v_u16m4_m(vbool4_t mask
, vuint16m4_t vs2
, size_t vl
) {
354 return __riscv_vrev8_v_u16m4_m(mask
, vs2
, vl
);
357 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vrev8_v_u16m8_m
358 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
359 // CHECK-RV64-NEXT: entry:
360 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
361 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
363 vuint16m8_t
test_vrev8_v_u16m8_m(vbool2_t mask
, vuint16m8_t vs2
, size_t vl
) {
364 return __riscv_vrev8_v_u16m8_m(mask
, vs2
, vl
);
367 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vrev8_v_u32mf2_m
368 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
371 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
373 vuint32mf2_t
test_vrev8_v_u32mf2_m(vbool64_t mask
, vuint32mf2_t vs2
, size_t vl
) {
374 return __riscv_vrev8_v_u32mf2_m(mask
, vs2
, vl
);
377 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vrev8_v_u32m1_m
378 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
379 // CHECK-RV64-NEXT: entry:
380 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
381 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
383 vuint32m1_t
test_vrev8_v_u32m1_m(vbool32_t mask
, vuint32m1_t vs2
, size_t vl
) {
384 return __riscv_vrev8_v_u32m1_m(mask
, vs2
, vl
);
387 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vrev8_v_u32m2_m
388 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
389 // CHECK-RV64-NEXT: entry:
390 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
391 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
393 vuint32m2_t
test_vrev8_v_u32m2_m(vbool16_t mask
, vuint32m2_t vs2
, size_t vl
) {
394 return __riscv_vrev8_v_u32m2_m(mask
, vs2
, vl
);
397 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vrev8_v_u32m4_m
398 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
399 // CHECK-RV64-NEXT: entry:
400 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
401 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
403 vuint32m4_t
test_vrev8_v_u32m4_m(vbool8_t mask
, vuint32m4_t vs2
, size_t vl
) {
404 return __riscv_vrev8_v_u32m4_m(mask
, vs2
, vl
);
407 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vrev8_v_u32m8_m
408 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
409 // CHECK-RV64-NEXT: entry:
410 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
411 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
413 vuint32m8_t
test_vrev8_v_u32m8_m(vbool4_t mask
, vuint32m8_t vs2
, size_t vl
) {
414 return __riscv_vrev8_v_u32m8_m(mask
, vs2
, vl
);
417 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vrev8_v_u64m1_m
418 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
419 // CHECK-RV64-NEXT: entry:
420 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
421 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
423 vuint64m1_t
test_vrev8_v_u64m1_m(vbool64_t mask
, vuint64m1_t vs2
, size_t vl
) {
424 return __riscv_vrev8_v_u64m1_m(mask
, vs2
, vl
);
427 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vrev8_v_u64m2_m
428 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
429 // CHECK-RV64-NEXT: entry:
430 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
431 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
433 vuint64m2_t
test_vrev8_v_u64m2_m(vbool32_t mask
, vuint64m2_t vs2
, size_t vl
) {
434 return __riscv_vrev8_v_u64m2_m(mask
, vs2
, vl
);
437 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vrev8_v_u64m4_m
438 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
439 // CHECK-RV64-NEXT: entry:
440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
441 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
443 vuint64m4_t
test_vrev8_v_u64m4_m(vbool16_t mask
, vuint64m4_t vs2
, size_t vl
) {
444 return __riscv_vrev8_v_u64m4_m(mask
, vs2
, vl
);
447 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vrev8_v_u64m8_m
448 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
449 // CHECK-RV64-NEXT: entry:
450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
451 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
453 vuint64m8_t
test_vrev8_v_u64m8_m(vbool8_t mask
, vuint64m8_t vs2
, size_t vl
) {
454 return __riscv_vrev8_v_u64m8_m(mask
, vs2
, vl
);