Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / policy / overloaded / vclz.c
blobba7ad3a9f5ec5446b858ec5c03a8f2e854d22a05
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvbb -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck %s
5 #include <riscv_vector.h>
7 // CHECK-LABEL: @test_vclz_v_u8mf8_tu(
8 // CHECK-NEXT: entry:
9 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
10 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
12 vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
13 return __riscv_vclz_tu(maskedoff, vs2, vl);
16 // CHECK-LABEL: @test_vclz_v_u8mf4_tu(
17 // CHECK-NEXT: entry:
18 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
19 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
21 vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
22 return __riscv_vclz_tu(maskedoff, vs2, vl);
25 // CHECK-LABEL: @test_vclz_v_u8mf2_tu(
26 // CHECK-NEXT: entry:
27 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
28 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
30 vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
31 return __riscv_vclz_tu(maskedoff, vs2, vl);
34 // CHECK-LABEL: @test_vclz_v_u8m1_tu(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
37 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
39 vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
40 return __riscv_vclz_tu(maskedoff, vs2, vl);
43 // CHECK-LABEL: @test_vclz_v_u8m2_tu(
44 // CHECK-NEXT: entry:
45 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
46 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
48 vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
49 return __riscv_vclz_tu(maskedoff, vs2, vl);
52 // CHECK-LABEL: @test_vclz_v_u8m4_tu(
53 // CHECK-NEXT: entry:
54 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
55 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
57 vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
58 return __riscv_vclz_tu(maskedoff, vs2, vl);
61 // CHECK-LABEL: @test_vclz_v_u8m8_tu(
62 // CHECK-NEXT: entry:
63 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 [[VL:%.*]])
64 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
66 vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
67 return __riscv_vclz_tu(maskedoff, vs2, vl);
70 // CHECK-LABEL: @test_vclz_v_u16mf4_tu(
71 // CHECK-NEXT: entry:
72 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
73 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
75 vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
76 return __riscv_vclz_tu(maskedoff, vs2, vl);
79 // CHECK-LABEL: @test_vclz_v_u16mf2_tu(
80 // CHECK-NEXT: entry:
81 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
82 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
84 vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
85 return __riscv_vclz_tu(maskedoff, vs2, vl);
88 // CHECK-LABEL: @test_vclz_v_u16m1_tu(
89 // CHECK-NEXT: entry:
90 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
91 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
93 vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
94 return __riscv_vclz_tu(maskedoff, vs2, vl);
97 // CHECK-LABEL: @test_vclz_v_u16m2_tu(
98 // CHECK-NEXT: entry:
99 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
100 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
102 vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
103 return __riscv_vclz_tu(maskedoff, vs2, vl);
106 // CHECK-LABEL: @test_vclz_v_u16m4_tu(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
109 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
111 vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
112 return __riscv_vclz_tu(maskedoff, vs2, vl);
115 // CHECK-LABEL: @test_vclz_v_u16m8_tu(
116 // CHECK-NEXT: entry:
117 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 [[VL:%.*]])
118 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
120 vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
121 return __riscv_vclz_tu(maskedoff, vs2, vl);
124 // CHECK-LABEL: @test_vclz_v_u32mf2_tu(
125 // CHECK-NEXT: entry:
126 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
127 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
129 vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
130 return __riscv_vclz_tu(maskedoff, vs2, vl);
133 // CHECK-LABEL: @test_vclz_v_u32m1_tu(
134 // CHECK-NEXT: entry:
135 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
136 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
138 vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
139 return __riscv_vclz_tu(maskedoff, vs2, vl);
142 // CHECK-LABEL: @test_vclz_v_u32m2_tu(
143 // CHECK-NEXT: entry:
144 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
145 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
147 vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
148 return __riscv_vclz_tu(maskedoff, vs2, vl);
151 // CHECK-LABEL: @test_vclz_v_u32m4_tu(
152 // CHECK-NEXT: entry:
153 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
154 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
156 vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
157 return __riscv_vclz_tu(maskedoff, vs2, vl);
160 // CHECK-LABEL: @test_vclz_v_u32m8_tu(
161 // CHECK-NEXT: entry:
162 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]])
163 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
165 vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
166 return __riscv_vclz_tu(maskedoff, vs2, vl);
169 // CHECK-LABEL: @test_vclz_v_u64m1_tu(
170 // CHECK-NEXT: entry:
171 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
172 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
174 vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
175 return __riscv_vclz_tu(maskedoff, vs2, vl);
178 // CHECK-LABEL: @test_vclz_v_u64m2_tu(
179 // CHECK-NEXT: entry:
180 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
181 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
183 vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
184 return __riscv_vclz_tu(maskedoff, vs2, vl);
187 // CHECK-LABEL: @test_vclz_v_u64m4_tu(
188 // CHECK-NEXT: entry:
189 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
190 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
192 vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
193 return __riscv_vclz_tu(maskedoff, vs2, vl);
196 // CHECK-LABEL: @test_vclz_v_u64m8_tu(
197 // CHECK-NEXT: entry:
198 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 [[VL:%.*]])
199 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
201 vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
202 return __riscv_vclz_tu(maskedoff, vs2, vl);
205 // CHECK-LABEL: @test_vclz_v_u8mf8_tum(
206 // CHECK-NEXT: entry:
207 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
208 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
210 vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
211 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
214 // CHECK-LABEL: @test_vclz_v_u8mf4_tum(
215 // CHECK-NEXT: entry:
216 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
217 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
219 vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
220 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
223 // CHECK-LABEL: @test_vclz_v_u8mf2_tum(
224 // CHECK-NEXT: entry:
225 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
226 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
228 vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
229 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
232 // CHECK-LABEL: @test_vclz_v_u8m1_tum(
233 // CHECK-NEXT: entry:
234 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
235 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
237 vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
238 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
241 // CHECK-LABEL: @test_vclz_v_u8m2_tum(
242 // CHECK-NEXT: entry:
243 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
244 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
246 vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
247 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
250 // CHECK-LABEL: @test_vclz_v_u8m4_tum(
251 // CHECK-NEXT: entry:
252 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
253 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
255 vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
256 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
259 // CHECK-LABEL: @test_vclz_v_u8m8_tum(
260 // CHECK-NEXT: entry:
261 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
262 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
264 vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
265 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
268 // CHECK-LABEL: @test_vclz_v_u16mf4_tum(
269 // CHECK-NEXT: entry:
270 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
271 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
273 vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
274 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
277 // CHECK-LABEL: @test_vclz_v_u16mf2_tum(
278 // CHECK-NEXT: entry:
279 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
280 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
282 vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
283 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
286 // CHECK-LABEL: @test_vclz_v_u16m1_tum(
287 // CHECK-NEXT: entry:
288 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
289 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
291 vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
292 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
295 // CHECK-LABEL: @test_vclz_v_u16m2_tum(
296 // CHECK-NEXT: entry:
297 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
298 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
300 vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
301 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
304 // CHECK-LABEL: @test_vclz_v_u16m4_tum(
305 // CHECK-NEXT: entry:
306 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
307 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
309 vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
310 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
313 // CHECK-LABEL: @test_vclz_v_u16m8_tum(
314 // CHECK-NEXT: entry:
315 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
316 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
318 vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
319 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
322 // CHECK-LABEL: @test_vclz_v_u32mf2_tum(
323 // CHECK-NEXT: entry:
324 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
325 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
327 vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
328 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
331 // CHECK-LABEL: @test_vclz_v_u32m1_tum(
332 // CHECK-NEXT: entry:
333 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
334 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
336 vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
337 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
340 // CHECK-LABEL: @test_vclz_v_u32m2_tum(
341 // CHECK-NEXT: entry:
342 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
343 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
345 vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
346 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
349 // CHECK-LABEL: @test_vclz_v_u32m4_tum(
350 // CHECK-NEXT: entry:
351 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
352 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
354 vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
355 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
358 // CHECK-LABEL: @test_vclz_v_u32m8_tum(
359 // CHECK-NEXT: entry:
360 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
361 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
363 vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
364 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
367 // CHECK-LABEL: @test_vclz_v_u64m1_tum(
368 // CHECK-NEXT: entry:
369 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
370 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
372 vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
373 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
376 // CHECK-LABEL: @test_vclz_v_u64m2_tum(
377 // CHECK-NEXT: entry:
378 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
379 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
381 vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
382 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
385 // CHECK-LABEL: @test_vclz_v_u64m4_tum(
386 // CHECK-NEXT: entry:
387 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
388 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
390 vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
391 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
394 // CHECK-LABEL: @test_vclz_v_u64m8_tum(
395 // CHECK-NEXT: entry:
396 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
397 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
399 vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
400 return __riscv_vclz_tum(mask, maskedoff, vs2, vl);
403 // CHECK-LABEL: @test_vclz_v_u8mf8_tumu(
404 // CHECK-NEXT: entry:
405 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
406 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
408 vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
409 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
412 // CHECK-LABEL: @test_vclz_v_u8mf4_tumu(
413 // CHECK-NEXT: entry:
414 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
415 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
417 vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
418 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
421 // CHECK-LABEL: @test_vclz_v_u8mf2_tumu(
422 // CHECK-NEXT: entry:
423 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
424 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
426 vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
427 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
430 // CHECK-LABEL: @test_vclz_v_u8m1_tumu(
431 // CHECK-NEXT: entry:
432 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
433 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
435 vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
436 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
439 // CHECK-LABEL: @test_vclz_v_u8m2_tumu(
440 // CHECK-NEXT: entry:
441 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
442 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
444 vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
445 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
448 // CHECK-LABEL: @test_vclz_v_u8m4_tumu(
449 // CHECK-NEXT: entry:
450 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
451 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
453 vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
454 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
457 // CHECK-LABEL: @test_vclz_v_u8m8_tumu(
458 // CHECK-NEXT: entry:
459 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
460 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
462 vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
463 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
466 // CHECK-LABEL: @test_vclz_v_u16mf4_tumu(
467 // CHECK-NEXT: entry:
468 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
469 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
471 vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
472 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
475 // CHECK-LABEL: @test_vclz_v_u16mf2_tumu(
476 // CHECK-NEXT: entry:
477 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
478 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
480 vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
481 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
484 // CHECK-LABEL: @test_vclz_v_u16m1_tumu(
485 // CHECK-NEXT: entry:
486 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
487 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
489 vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
490 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
493 // CHECK-LABEL: @test_vclz_v_u16m2_tumu(
494 // CHECK-NEXT: entry:
495 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
496 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
498 vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
499 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
502 // CHECK-LABEL: @test_vclz_v_u16m4_tumu(
503 // CHECK-NEXT: entry:
504 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
505 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
507 vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
508 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
511 // CHECK-LABEL: @test_vclz_v_u16m8_tumu(
512 // CHECK-NEXT: entry:
513 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
514 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
516 vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
517 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
520 // CHECK-LABEL: @test_vclz_v_u32mf2_tumu(
521 // CHECK-NEXT: entry:
522 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
523 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
525 vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
526 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
529 // CHECK-LABEL: @test_vclz_v_u32m1_tumu(
530 // CHECK-NEXT: entry:
531 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
532 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
534 vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
535 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
538 // CHECK-LABEL: @test_vclz_v_u32m2_tumu(
539 // CHECK-NEXT: entry:
540 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
541 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
543 vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
544 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
547 // CHECK-LABEL: @test_vclz_v_u32m4_tumu(
548 // CHECK-NEXT: entry:
549 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
550 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
552 vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
553 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
556 // CHECK-LABEL: @test_vclz_v_u32m8_tumu(
557 // CHECK-NEXT: entry:
558 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
559 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
561 vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
562 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
565 // CHECK-LABEL: @test_vclz_v_u64m1_tumu(
566 // CHECK-NEXT: entry:
567 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
568 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
570 vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
571 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
574 // CHECK-LABEL: @test_vclz_v_u64m2_tumu(
575 // CHECK-NEXT: entry:
576 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
577 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
579 vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
580 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
583 // CHECK-LABEL: @test_vclz_v_u64m4_tumu(
584 // CHECK-NEXT: entry:
585 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
586 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
588 vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
589 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
592 // CHECK-LABEL: @test_vclz_v_u64m8_tumu(
593 // CHECK-NEXT: entry:
594 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
595 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
597 vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
598 return __riscv_vclz_tumu(mask, maskedoff, vs2, vl);
601 // CHECK-LABEL: @test_vclz_v_u8mf8_mu(
602 // CHECK-NEXT: entry:
603 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vclz.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
604 // CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
606 vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t vs2, size_t vl) {
607 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
610 // CHECK-LABEL: @test_vclz_v_u8mf4_mu(
611 // CHECK-NEXT: entry:
612 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vclz.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
613 // CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
615 vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t vs2, size_t vl) {
616 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
619 // CHECK-LABEL: @test_vclz_v_u8mf2_mu(
620 // CHECK-NEXT: entry:
621 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vclz.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
622 // CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
624 vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t vs2, size_t vl) {
625 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
628 // CHECK-LABEL: @test_vclz_v_u8m1_mu(
629 // CHECK-NEXT: entry:
630 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vclz.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
631 // CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
633 vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vs2, size_t vl) {
634 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
637 // CHECK-LABEL: @test_vclz_v_u8m2_mu(
638 // CHECK-NEXT: entry:
639 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vclz.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
640 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
642 vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t vs2, size_t vl) {
643 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
646 // CHECK-LABEL: @test_vclz_v_u8m4_mu(
647 // CHECK-NEXT: entry:
648 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vclz.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
649 // CHECK-NEXT: ret <vscale x 32 x i8> [[TMP0]]
651 vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t vs2, size_t vl) {
652 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
655 // CHECK-LABEL: @test_vclz_v_u8m8_mu(
656 // CHECK-NEXT: entry:
657 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vclz.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
658 // CHECK-NEXT: ret <vscale x 64 x i8> [[TMP0]]
660 vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t vs2, size_t vl) {
661 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
664 // CHECK-LABEL: @test_vclz_v_u16mf4_mu(
665 // CHECK-NEXT: entry:
666 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vclz.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
667 // CHECK-NEXT: ret <vscale x 1 x i16> [[TMP0]]
669 vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t vs2, size_t vl) {
670 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
673 // CHECK-LABEL: @test_vclz_v_u16mf2_mu(
674 // CHECK-NEXT: entry:
675 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vclz.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
676 // CHECK-NEXT: ret <vscale x 2 x i16> [[TMP0]]
678 vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t vs2, size_t vl) {
679 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
682 // CHECK-LABEL: @test_vclz_v_u16m1_mu(
683 // CHECK-NEXT: entry:
684 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vclz.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
685 // CHECK-NEXT: ret <vscale x 4 x i16> [[TMP0]]
687 vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vs2, size_t vl) {
688 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
691 // CHECK-LABEL: @test_vclz_v_u16m2_mu(
692 // CHECK-NEXT: entry:
693 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vclz.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
694 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
696 vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t vs2, size_t vl) {
697 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
700 // CHECK-LABEL: @test_vclz_v_u16m4_mu(
701 // CHECK-NEXT: entry:
702 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vclz.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
703 // CHECK-NEXT: ret <vscale x 16 x i16> [[TMP0]]
705 vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t vs2, size_t vl) {
706 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
709 // CHECK-LABEL: @test_vclz_v_u16m8_mu(
710 // CHECK-NEXT: entry:
711 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vclz.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
712 // CHECK-NEXT: ret <vscale x 32 x i16> [[TMP0]]
714 vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t vs2, size_t vl) {
715 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
718 // CHECK-LABEL: @test_vclz_v_u32mf2_mu(
719 // CHECK-NEXT: entry:
720 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vclz.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
721 // CHECK-NEXT: ret <vscale x 1 x i32> [[TMP0]]
723 vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t vs2, size_t vl) {
724 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
727 // CHECK-LABEL: @test_vclz_v_u32m1_mu(
728 // CHECK-NEXT: entry:
729 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vclz.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
730 // CHECK-NEXT: ret <vscale x 2 x i32> [[TMP0]]
732 vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vs2, size_t vl) {
733 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
736 // CHECK-LABEL: @test_vclz_v_u32m2_mu(
737 // CHECK-NEXT: entry:
738 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vclz.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
739 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
741 vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t vs2, size_t vl) {
742 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
745 // CHECK-LABEL: @test_vclz_v_u32m4_mu(
746 // CHECK-NEXT: entry:
747 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vclz.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
748 // CHECK-NEXT: ret <vscale x 8 x i32> [[TMP0]]
750 vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t vs2, size_t vl) {
751 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
754 // CHECK-LABEL: @test_vclz_v_u32m8_mu(
755 // CHECK-NEXT: entry:
756 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vclz.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
757 // CHECK-NEXT: ret <vscale x 16 x i32> [[TMP0]]
759 vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t vs2, size_t vl) {
760 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
763 // CHECK-LABEL: @test_vclz_v_u64m1_mu(
764 // CHECK-NEXT: entry:
765 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclz.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
766 // CHECK-NEXT: ret <vscale x 1 x i64> [[TMP0]]
768 vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vs2, size_t vl) {
769 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
772 // CHECK-LABEL: @test_vclz_v_u64m2_mu(
773 // CHECK-NEXT: entry:
774 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclz.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
775 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
777 vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t vs2, size_t vl) {
778 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
781 // CHECK-LABEL: @test_vclz_v_u64m4_mu(
782 // CHECK-NEXT: entry:
783 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclz.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
784 // CHECK-NEXT: ret <vscale x 4 x i64> [[TMP0]]
786 vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t vs2, size_t vl) {
787 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);
790 // CHECK-LABEL: @test_vclz_v_u64m8_mu(
791 // CHECK-NEXT: entry:
792 // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclz.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
793 // CHECK-NEXT: ret <vscale x 8 x i64> [[TMP0]]
795 vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t vs2, size_t vl) {
796 return __riscv_vclz_mu(mask, maskedoff, vs2, vl);