Warn when unique objects might be duplicated in shared libraries (#117622)
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / policy / non-overloaded / vclz.c
blob11c4db6237c5f33c14852ca94204c846ad622fa5
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \
4 // RUN: -target-feature +zvbb \
5 // RUN: -target-feature +zvbc \
6 // RUN: -target-feature +zvkb \
7 // RUN: -target-feature +zvkg \
8 // RUN: -target-feature +zvkned \
9 // RUN: -target-feature +zvknhb \
10 // RUN: -target-feature +zvksed \
11 // RUN: -target-feature +zvksh \
12 // RUN: -disable-O0-optnone \
13 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
14 // RUN: FileCheck %s
16 #include <riscv_vector.h>
18 // CHECK-LABEL: @test_vclz_v_u8mf8_tu(
19 // CHECK-NEXT: entry:
20 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
21 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
23 vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
24 return __riscv_vclz_v_u8mf8_tu(maskedoff, vs2, vl);
27 // CHECK-LABEL: @test_vclz_v_u8mf4_tu(
28 // CHECK-NEXT: entry:
29 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
30 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
32 vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
33 return __riscv_vclz_v_u8mf4_tu(maskedoff, vs2, vl);
36 // CHECK-LABEL: @test_vclz_v_u8mf2_tu(
37 // CHECK-NEXT: entry:
38 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
39 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
41 vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
42 return __riscv_vclz_v_u8mf2_tu(maskedoff, vs2, vl);
45 // CHECK-LABEL: @test_vclz_v_u8m1_tu(
46 // CHECK-NEXT: entry:
47 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
48 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
50 vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
51 return __riscv_vclz_v_u8m1_tu(maskedoff, vs2, vl);
54 // CHECK-LABEL: @test_vclz_v_u8m2_tu(
55 // CHECK-NEXT: entry:
56 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
57 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
59 vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
60 return __riscv_vclz_v_u8m2_tu(maskedoff, vs2, vl);
63 // CHECK-LABEL: @test_vclz_v_u8m4_tu(
64 // CHECK-NEXT: entry:
65 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
66 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
68 vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
69 return __riscv_vclz_v_u8m4_tu(maskedoff, vs2, vl);
72 // CHECK-LABEL: @test_vclz_v_u8m8_tu(
73 // CHECK-NEXT: entry:
74 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
75 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
77 vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
78 return __riscv_vclz_v_u8m8_tu(maskedoff, vs2, vl);
81 // CHECK-LABEL: @test_vclz_v_u16mf4_tu(
82 // CHECK-NEXT: entry:
83 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
84 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
86 vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
87 return __riscv_vclz_v_u16mf4_tu(maskedoff, vs2, vl);
90 // CHECK-LABEL: @test_vclz_v_u16mf2_tu(
91 // CHECK-NEXT: entry:
92 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
93 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
95 vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
96 return __riscv_vclz_v_u16mf2_tu(maskedoff, vs2, vl);
99 // CHECK-LABEL: @test_vclz_v_u16m1_tu(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
102 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
104 vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
105 return __riscv_vclz_v_u16m1_tu(maskedoff, vs2, vl);
108 // CHECK-LABEL: @test_vclz_v_u16m2_tu(
109 // CHECK-NEXT: entry:
110 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
111 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
113 vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
114 return __riscv_vclz_v_u16m2_tu(maskedoff, vs2, vl);
117 // CHECK-LABEL: @test_vclz_v_u16m4_tu(
118 // CHECK-NEXT: entry:
119 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
120 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
122 vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
123 return __riscv_vclz_v_u16m4_tu(maskedoff, vs2, vl);
126 // CHECK-LABEL: @test_vclz_v_u16m8_tu(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
129 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
131 vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
132 return __riscv_vclz_v_u16m8_tu(maskedoff, vs2, vl);
135 // CHECK-LABEL: @test_vclz_v_u32mf2_tu(
136 // CHECK-NEXT: entry:
137 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
138 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
140 vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
141 return __riscv_vclz_v_u32mf2_tu(maskedoff, vs2, vl);
144 // CHECK-LABEL: @test_vclz_v_u32m1_tu(
145 // CHECK-NEXT: entry:
146 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
147 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
149 vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
150 return __riscv_vclz_v_u32m1_tu(maskedoff, vs2, vl);
153 // CHECK-LABEL: @test_vclz_v_u32m2_tu(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
156 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
158 vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
159 return __riscv_vclz_v_u32m2_tu(maskedoff, vs2, vl);
162 // CHECK-LABEL: @test_vclz_v_u32m4_tu(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
165 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
167 vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
168 return __riscv_vclz_v_u32m4_tu(maskedoff, vs2, vl);
171 // CHECK-LABEL: @test_vclz_v_u32m8_tu(
172 // CHECK-NEXT: entry:
173 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
174 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
176 vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
177 return __riscv_vclz_v_u32m8_tu(maskedoff, vs2, vl);
180 // CHECK-LABEL: @test_vclz_v_u64m1_tu(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
183 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
185 vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
186 return __riscv_vclz_v_u64m1_tu(maskedoff, vs2, vl);
189 // CHECK-LABEL: @test_vclz_v_u64m2_tu(
190 // CHECK-NEXT: entry:
191 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
192 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
194 vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
195 return __riscv_vclz_v_u64m2_tu(maskedoff, vs2, vl);
198 // CHECK-LABEL: @test_vclz_v_u64m4_tu(
199 // CHECK-NEXT: entry:
200 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
201 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
203 vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
204 return __riscv_vclz_v_u64m4_tu(maskedoff, vs2, vl);
207 // CHECK-LABEL: @test_vclz_v_u64m8_tu(
208 // CHECK-NEXT: entry:
209 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
210 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
212 vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
213 return __riscv_vclz_v_u64m8_tu(maskedoff, vs2, vl);
216 // CHECK-LABEL: @test_vclz_v_u8mf8_tum(
217 // CHECK-NEXT: entry:
218 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
219 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
221 vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
222 return __riscv_vclz_v_u8mf8_tum(mask, maskedoff, vs2, vl);
225 // CHECK-LABEL: @test_vclz_v_u8mf4_tum(
226 // CHECK-NEXT: entry:
227 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
228 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
230 vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
231 return __riscv_vclz_v_u8mf4_tum(mask, maskedoff, vs2, vl);
234 // CHECK-LABEL: @test_vclz_v_u8mf2_tum(
235 // CHECK-NEXT: entry:
236 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
237 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
239 vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
240 return __riscv_vclz_v_u8mf2_tum(mask, maskedoff, vs2, vl);
243 // CHECK-LABEL: @test_vclz_v_u8m1_tum(
244 // CHECK-NEXT: entry:
245 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
246 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
248 vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
249 return __riscv_vclz_v_u8m1_tum(mask, maskedoff, vs2, vl);
252 // CHECK-LABEL: @test_vclz_v_u8m2_tum(
253 // CHECK-NEXT: entry:
254 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
255 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
257 vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
258 return __riscv_vclz_v_u8m2_tum(mask, maskedoff, vs2, vl);
261 // CHECK-LABEL: @test_vclz_v_u8m4_tum(
262 // CHECK-NEXT: entry:
263 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
264 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
266 vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
267 return __riscv_vclz_v_u8m4_tum(mask, maskedoff, vs2, vl);
270 // CHECK-LABEL: @test_vclz_v_u8m8_tum(
271 // CHECK-NEXT: entry:
272 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
273 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
275 vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
276 return __riscv_vclz_v_u8m8_tum(mask, maskedoff, vs2, vl);
279 // CHECK-LABEL: @test_vclz_v_u16mf4_tum(
280 // CHECK-NEXT: entry:
281 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
282 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
284 vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
285 return __riscv_vclz_v_u16mf4_tum(mask, maskedoff, vs2, vl);
288 // CHECK-LABEL: @test_vclz_v_u16mf2_tum(
289 // CHECK-NEXT: entry:
290 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
291 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
293 vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
294 return __riscv_vclz_v_u16mf2_tum(mask, maskedoff, vs2, vl);
297 // CHECK-LABEL: @test_vclz_v_u16m1_tum(
298 // CHECK-NEXT: entry:
299 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
300 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
302 vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
303 return __riscv_vclz_v_u16m1_tum(mask, maskedoff, vs2, vl);
306 // CHECK-LABEL: @test_vclz_v_u16m2_tum(
307 // CHECK-NEXT: entry:
308 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
309 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
311 vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
312 return __riscv_vclz_v_u16m2_tum(mask, maskedoff, vs2, vl);
315 // CHECK-LABEL: @test_vclz_v_u16m4_tum(
316 // CHECK-NEXT: entry:
317 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
318 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
320 vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
321 return __riscv_vclz_v_u16m4_tum(mask, maskedoff, vs2, vl);
324 // CHECK-LABEL: @test_vclz_v_u16m8_tum(
325 // CHECK-NEXT: entry:
326 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
327 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
329 vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
330 return __riscv_vclz_v_u16m8_tum(mask, maskedoff, vs2, vl);
333 // CHECK-LABEL: @test_vclz_v_u32mf2_tum(
334 // CHECK-NEXT: entry:
335 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
336 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
338 vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
339 return __riscv_vclz_v_u32mf2_tum(mask, maskedoff, vs2, vl);
342 // CHECK-LABEL: @test_vclz_v_u32m1_tum(
343 // CHECK-NEXT: entry:
344 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
345 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
347 vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
348 return __riscv_vclz_v_u32m1_tum(mask, maskedoff, vs2, vl);
351 // CHECK-LABEL: @test_vclz_v_u32m2_tum(
352 // CHECK-NEXT: entry:
353 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
354 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
356 vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
357 return __riscv_vclz_v_u32m2_tum(mask, maskedoff, vs2, vl);
360 // CHECK-LABEL: @test_vclz_v_u32m4_tum(
361 // CHECK-NEXT: entry:
362 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
363 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
365 vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
366 return __riscv_vclz_v_u32m4_tum(mask, maskedoff, vs2, vl);
369 // CHECK-LABEL: @test_vclz_v_u32m8_tum(
370 // CHECK-NEXT: entry:
371 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
372 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
374 vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
375 return __riscv_vclz_v_u32m8_tum(mask, maskedoff, vs2, vl);
378 // CHECK-LABEL: @test_vclz_v_u64m1_tum(
379 // CHECK-NEXT: entry:
380 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
381 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
383 vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
384 return __riscv_vclz_v_u64m1_tum(mask, maskedoff, vs2, vl);
387 // CHECK-LABEL: @test_vclz_v_u64m2_tum(
388 // CHECK-NEXT: entry:
389 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
390 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
392 vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
393 return __riscv_vclz_v_u64m2_tum(mask, maskedoff, vs2, vl);
396 // CHECK-LABEL: @test_vclz_v_u64m4_tum(
397 // CHECK-NEXT: entry:
398 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
399 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
401 vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
402 return __riscv_vclz_v_u64m4_tum(mask, maskedoff, vs2, vl);
405 // CHECK-LABEL: @test_vclz_v_u64m8_tum(
406 // CHECK-NEXT: entry:
407 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
408 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
410 vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
411 return __riscv_vclz_v_u64m8_tum(mask, maskedoff, vs2, vl);
414 // CHECK-LABEL: @test_vclz_v_u8mf8_tumu(
415 // CHECK-NEXT: entry:
416 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
417 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
419 vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
420 return __riscv_vclz_v_u8mf8_tumu(mask, maskedoff, vs2, vl);
423 // CHECK-LABEL: @test_vclz_v_u8mf4_tumu(
424 // CHECK-NEXT: entry:
425 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
426 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
428 vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
429 return __riscv_vclz_v_u8mf4_tumu(mask, maskedoff, vs2, vl);
432 // CHECK-LABEL: @test_vclz_v_u8mf2_tumu(
433 // CHECK-NEXT: entry:
434 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
435 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
437 vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
438 return __riscv_vclz_v_u8mf2_tumu(mask, maskedoff, vs2, vl);
441 // CHECK-LABEL: @test_vclz_v_u8m1_tumu(
442 // CHECK-NEXT: entry:
443 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
444 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
446 vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
447 return __riscv_vclz_v_u8m1_tumu(mask, maskedoff, vs2, vl);
450 // CHECK-LABEL: @test_vclz_v_u8m2_tumu(
451 // CHECK-NEXT: entry:
452 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
453 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
455 vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
456 return __riscv_vclz_v_u8m2_tumu(mask, maskedoff, vs2, vl);
459 // CHECK-LABEL: @test_vclz_v_u8m4_tumu(
460 // CHECK-NEXT: entry:
461 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
462 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
464 vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
465 return __riscv_vclz_v_u8m4_tumu(mask, maskedoff, vs2, vl);
468 // CHECK-LABEL: @test_vclz_v_u8m8_tumu(
469 // CHECK-NEXT: entry:
470 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
471 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
473 vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
474 return __riscv_vclz_v_u8m8_tumu(mask, maskedoff, vs2, vl);
477 // CHECK-LABEL: @test_vclz_v_u16mf4_tumu(
478 // CHECK-NEXT: entry:
479 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
480 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
482 vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
483 return __riscv_vclz_v_u16mf4_tumu(mask, maskedoff, vs2, vl);
486 // CHECK-LABEL: @test_vclz_v_u16mf2_tumu(
487 // CHECK-NEXT: entry:
488 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
489 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
491 vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
492 return __riscv_vclz_v_u16mf2_tumu(mask, maskedoff, vs2, vl);
495 // CHECK-LABEL: @test_vclz_v_u16m1_tumu(
496 // CHECK-NEXT: entry:
497 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
498 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
500 vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
501 return __riscv_vclz_v_u16m1_tumu(mask, maskedoff, vs2, vl);
504 // CHECK-LABEL: @test_vclz_v_u16m2_tumu(
505 // CHECK-NEXT: entry:
506 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
507 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
509 vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
510 return __riscv_vclz_v_u16m2_tumu(mask, maskedoff, vs2, vl);
513 // CHECK-LABEL: @test_vclz_v_u16m4_tumu(
514 // CHECK-NEXT: entry:
515 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
516 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
518 vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
519 return __riscv_vclz_v_u16m4_tumu(mask, maskedoff, vs2, vl);
522 // CHECK-LABEL: @test_vclz_v_u16m8_tumu(
523 // CHECK-NEXT: entry:
524 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
525 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
527 vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
528 return __riscv_vclz_v_u16m8_tumu(mask, maskedoff, vs2, vl);
531 // CHECK-LABEL: @test_vclz_v_u32mf2_tumu(
532 // CHECK-NEXT: entry:
533 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
534 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
536 vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
537 return __riscv_vclz_v_u32mf2_tumu(mask, maskedoff, vs2, vl);
540 // CHECK-LABEL: @test_vclz_v_u32m1_tumu(
541 // CHECK-NEXT: entry:
542 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
543 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
545 vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
546 return __riscv_vclz_v_u32m1_tumu(mask, maskedoff, vs2, vl);
549 // CHECK-LABEL: @test_vclz_v_u32m2_tumu(
550 // CHECK-NEXT: entry:
551 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
552 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
554 vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
555 return __riscv_vclz_v_u32m2_tumu(mask, maskedoff, vs2, vl);
558 // CHECK-LABEL: @test_vclz_v_u32m4_tumu(
559 // CHECK-NEXT: entry:
560 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
561 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
563 vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
564 return __riscv_vclz_v_u32m4_tumu(mask, maskedoff, vs2, vl);
567 // CHECK-LABEL: @test_vclz_v_u32m8_tumu(
568 // CHECK-NEXT: entry:
569 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
570 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
572 vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
573 return __riscv_vclz_v_u32m8_tumu(mask, maskedoff, vs2, vl);
576 // CHECK-LABEL: @test_vclz_v_u64m1_tumu(
577 // CHECK-NEXT: entry:
578 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
579 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
581 vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
582 return __riscv_vclz_v_u64m1_tumu(mask, maskedoff, vs2, vl);
585 // CHECK-LABEL: @test_vclz_v_u64m2_tumu(
586 // CHECK-NEXT: entry:
587 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
588 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
590 vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
591 return __riscv_vclz_v_u64m2_tumu(mask, maskedoff, vs2, vl);
594 // CHECK-LABEL: @test_vclz_v_u64m4_tumu(
595 // CHECK-NEXT: entry:
596 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
597 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
599 vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
600 return __riscv_vclz_v_u64m4_tumu(mask, maskedoff, vs2, vl);
603 // CHECK-LABEL: @test_vclz_v_u64m8_tumu(
604 // CHECK-NEXT: entry:
605 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
606 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
608 vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
609 return __riscv_vclz_v_u64m8_tumu(mask, maskedoff, vs2, vl);
612 // CHECK-LABEL: @test_vclz_v_u8mf8_mu(
613 // CHECK-NEXT: entry:
614 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
615 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
617 vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
618 return __riscv_vclz_v_u8mf8_mu(mask, maskedoff, vs2, vl);
621 // CHECK-LABEL: @test_vclz_v_u8mf4_mu(
622 // CHECK-NEXT: entry:
623 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
624 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
626 vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
627 return __riscv_vclz_v_u8mf4_mu(mask, maskedoff, vs2, vl);
630 // CHECK-LABEL: @test_vclz_v_u8mf2_mu(
631 // CHECK-NEXT: entry:
632 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
633 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
635 vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
636 return __riscv_vclz_v_u8mf2_mu(mask, maskedoff, vs2, vl);
639 // CHECK-LABEL: @test_vclz_v_u8m1_mu(
640 // CHECK-NEXT: entry:
641 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
642 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
644 vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
645 return __riscv_vclz_v_u8m1_mu(mask, maskedoff, vs2, vl);
648 // CHECK-LABEL: @test_vclz_v_u8m2_mu(
649 // CHECK-NEXT: entry:
650 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
651 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
653 vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
654 return __riscv_vclz_v_u8m2_mu(mask, maskedoff, vs2, vl);
657 // CHECK-LABEL: @test_vclz_v_u8m4_mu(
658 // CHECK-NEXT: entry:
659 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
660 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
662 vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
663 return __riscv_vclz_v_u8m4_mu(mask, maskedoff, vs2, vl);
666 // CHECK-LABEL: @test_vclz_v_u8m8_mu(
667 // CHECK-NEXT: entry:
668 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
669 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
671 vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
672 return __riscv_vclz_v_u8m8_mu(mask, maskedoff, vs2, vl);
675 // CHECK-LABEL: @test_vclz_v_u16mf4_mu(
676 // CHECK-NEXT: entry:
677 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
678 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
680 vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
681 return __riscv_vclz_v_u16mf4_mu(mask, maskedoff, vs2, vl);
684 // CHECK-LABEL: @test_vclz_v_u16mf2_mu(
685 // CHECK-NEXT: entry:
686 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
687 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
689 vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
690 return __riscv_vclz_v_u16mf2_mu(mask, maskedoff, vs2, vl);
693 // CHECK-LABEL: @test_vclz_v_u16m1_mu(
694 // CHECK-NEXT: entry:
695 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
696 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
698 vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
699 return __riscv_vclz_v_u16m1_mu(mask, maskedoff, vs2, vl);
702 // CHECK-LABEL: @test_vclz_v_u16m2_mu(
703 // CHECK-NEXT: entry:
704 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
705 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
707 vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
708 return __riscv_vclz_v_u16m2_mu(mask, maskedoff, vs2, vl);
711 // CHECK-LABEL: @test_vclz_v_u16m4_mu(
712 // CHECK-NEXT: entry:
713 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
714 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
716 vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
717 return __riscv_vclz_v_u16m4_mu(mask, maskedoff, vs2, vl);
720 // CHECK-LABEL: @test_vclz_v_u16m8_mu(
721 // CHECK-NEXT: entry:
722 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
723 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
725 vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
726 return __riscv_vclz_v_u16m8_mu(mask, maskedoff, vs2, vl);
729 // CHECK-LABEL: @test_vclz_v_u32mf2_mu(
730 // CHECK-NEXT: entry:
731 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
732 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
734 vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
735 return __riscv_vclz_v_u32mf2_mu(mask, maskedoff, vs2, vl);
738 // CHECK-LABEL: @test_vclz_v_u32m1_mu(
739 // CHECK-NEXT: entry:
740 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
741 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
743 vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
744 return __riscv_vclz_v_u32m1_mu(mask, maskedoff, vs2, vl);
747 // CHECK-LABEL: @test_vclz_v_u32m2_mu(
748 // CHECK-NEXT: entry:
749 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
750 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
752 vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
753 return __riscv_vclz_v_u32m2_mu(mask, maskedoff, vs2, vl);
756 // CHECK-LABEL: @test_vclz_v_u32m4_mu(
757 // CHECK-NEXT: entry:
758 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
759 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
761 vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
762 return __riscv_vclz_v_u32m4_mu(mask, maskedoff, vs2, vl);
765 // CHECK-LABEL: @test_vclz_v_u32m8_mu(
766 // CHECK-NEXT: entry:
767 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
768 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
770 vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
771 return __riscv_vclz_v_u32m8_mu(mask, maskedoff, vs2, vl);
774 // CHECK-LABEL: @test_vclz_v_u64m1_mu(
775 // CHECK-NEXT: entry:
776 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
777 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
779 vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
780 return __riscv_vclz_v_u64m1_mu(mask, maskedoff, vs2, vl);
783 // CHECK-LABEL: @test_vclz_v_u64m2_mu(
784 // CHECK-NEXT: entry:
785 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
786 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
788 vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
789 return __riscv_vclz_v_u64m2_mu(mask, maskedoff, vs2, vl);
792 // CHECK-LABEL: @test_vclz_v_u64m4_mu(
793 // CHECK-NEXT: entry:
794 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
795 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
797 vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
798 return __riscv_vclz_v_u64m4_mu(mask, maskedoff, vs2, vl);
801 // CHECK-LABEL: @test_vclz_v_u64m8_mu(
802 // CHECK-NEXT: entry:
803 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
804 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
806 vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
807 return __riscv_vclz_v_u64m8_mu(mask, maskedoff, vs2, vl);