1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
13 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
20 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
26 ret <vscale x 8 x i8> %a
29 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
36 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
40 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
43 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
50 ret <vscale x 8 x i8> %a
53 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
59 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
60 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
63 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
66 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
72 ret <vscale x 8 x i8> %a
75 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
82 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
86 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
89 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
96 ret <vscale x 8 x i8> %a
99 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
105 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
106 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
109 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
112 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
113 <vscale x 8 x i8> %0,
114 <vscale x 4 x i8> %1,
115 <vscale x 8 x i8> %2,
118 ret <vscale x 8 x i8> %a
121 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
128 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
132 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
135 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
136 <vscale x 8 x i8> %0,
137 <vscale x 4 x i8> %1,
138 <vscale x 8 x i8> %2,
139 <vscale x 4 x i1> %3,
142 ret <vscale x 8 x i8> %a
145 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
151 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
152 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
155 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
158 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
159 <vscale x 8 x i8> %0,
160 <vscale x 8 x i8> %1,
161 <vscale x 8 x i8> %2,
164 ret <vscale x 8 x i8> %a
167 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
174 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
178 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
181 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
182 <vscale x 8 x i8> %0,
183 <vscale x 8 x i8> %1,
184 <vscale x 8 x i8> %2,
185 <vscale x 8 x i1> %3,
188 ret <vscale x 8 x i8> %a
191 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
197 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
198 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
201 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9
204 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
205 <vscale x 8 x i8> %0,
206 <vscale x 16 x i8> %1,
207 <vscale x 8 x i8> %2,
210 ret <vscale x 8 x i8> %a
213 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
220 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
224 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
227 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
228 <vscale x 8 x i8> %0,
229 <vscale x 16 x i8> %1,
230 <vscale x 8 x i8> %2,
231 <vscale x 16 x i1> %3,
234 ret <vscale x 8 x i8> %a
237 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
243 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
244 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
247 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9
250 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
251 <vscale x 8 x i8> %0,
252 <vscale x 32 x i8> %1,
253 <vscale x 8 x i8> %2,
256 ret <vscale x 8 x i8> %a
259 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
266 define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
270 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
273 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
274 <vscale x 8 x i8> %0,
275 <vscale x 32 x i8> %1,
276 <vscale x 8 x i8> %2,
277 <vscale x 32 x i1> %3,
280 ret <vscale x 8 x i8> %a
283 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
289 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
290 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
293 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
296 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
297 <vscale x 4 x i16> %0,
298 <vscale x 1 x i16> %1,
299 <vscale x 4 x i16> %2,
302 ret <vscale x 4 x i16> %a
305 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
312 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
316 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
319 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
320 <vscale x 4 x i16> %0,
321 <vscale x 1 x i16> %1,
322 <vscale x 4 x i16> %2,
323 <vscale x 1 x i1> %3,
326 ret <vscale x 4 x i16> %a
329 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
335 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
336 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16:
337 ; CHECK: # %bb.0: # %entry
338 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
339 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
342 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
343 <vscale x 4 x i16> %0,
344 <vscale x 2 x i16> %1,
345 <vscale x 4 x i16> %2,
348 ret <vscale x 4 x i16> %a
351 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
358 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
359 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
362 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
365 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
366 <vscale x 4 x i16> %0,
367 <vscale x 2 x i16> %1,
368 <vscale x 4 x i16> %2,
369 <vscale x 2 x i1> %3,
372 ret <vscale x 4 x i16> %a
375 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
381 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
382 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16:
383 ; CHECK: # %bb.0: # %entry
384 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
385 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
388 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
389 <vscale x 4 x i16> %0,
390 <vscale x 4 x i16> %1,
391 <vscale x 4 x i16> %2,
394 ret <vscale x 4 x i16> %a
397 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
404 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
405 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
406 ; CHECK: # %bb.0: # %entry
407 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
408 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
411 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
412 <vscale x 4 x i16> %0,
413 <vscale x 4 x i16> %1,
414 <vscale x 4 x i16> %2,
415 <vscale x 4 x i1> %3,
418 ret <vscale x 4 x i16> %a
421 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
427 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
428 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16:
429 ; CHECK: # %bb.0: # %entry
430 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
431 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9
434 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
435 <vscale x 4 x i16> %0,
436 <vscale x 8 x i16> %1,
437 <vscale x 4 x i16> %2,
440 ret <vscale x 4 x i16> %a
443 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
450 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
454 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
457 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
458 <vscale x 4 x i16> %0,
459 <vscale x 8 x i16> %1,
460 <vscale x 4 x i16> %2,
461 <vscale x 8 x i1> %3,
464 ret <vscale x 4 x i16> %a
467 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
473 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
474 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16:
475 ; CHECK: # %bb.0: # %entry
476 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
477 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9
480 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
481 <vscale x 4 x i16> %0,
482 <vscale x 16 x i16> %1,
483 <vscale x 4 x i16> %2,
486 ret <vscale x 4 x i16> %a
489 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
496 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
497 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
498 ; CHECK: # %bb.0: # %entry
499 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
500 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
503 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
504 <vscale x 4 x i16> %0,
505 <vscale x 16 x i16> %1,
506 <vscale x 4 x i16> %2,
507 <vscale x 16 x i1> %3,
510 ret <vscale x 4 x i16> %a
513 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
519 define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
520 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
523 ; CHECK-NEXT: vredmaxu.vs v8, v16, v9
526 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
527 <vscale x 4 x i16> %0,
528 <vscale x 32 x i16> %1,
529 <vscale x 4 x i16> %2,
532 ret <vscale x 4 x i16> %a
535 declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
542 define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
543 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
546 ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
549 %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
550 <vscale x 4 x i16> %0,
551 <vscale x 32 x i16> %1,
552 <vscale x 4 x i16> %2,
553 <vscale x 32 x i1> %3,
556 ret <vscale x 4 x i16> %a
559 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
565 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
566 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
569 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
572 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
573 <vscale x 2 x i32> %0,
574 <vscale x 1 x i32> %1,
575 <vscale x 2 x i32> %2,
578 ret <vscale x 2 x i32> %a
581 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
588 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
589 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
592 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
595 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
596 <vscale x 2 x i32> %0,
597 <vscale x 1 x i32> %1,
598 <vscale x 2 x i32> %2,
599 <vscale x 1 x i1> %3,
602 ret <vscale x 2 x i32> %a
605 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
611 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
612 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
615 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
618 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
619 <vscale x 2 x i32> %0,
620 <vscale x 2 x i32> %1,
621 <vscale x 2 x i32> %2,
624 ret <vscale x 2 x i32> %a
627 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
634 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
635 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
638 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
641 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
642 <vscale x 2 x i32> %0,
643 <vscale x 2 x i32> %1,
644 <vscale x 2 x i32> %2,
645 <vscale x 2 x i1> %3,
648 ret <vscale x 2 x i32> %a
651 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
657 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
658 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
661 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9
664 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
665 <vscale x 2 x i32> %0,
666 <vscale x 4 x i32> %1,
667 <vscale x 2 x i32> %2,
670 ret <vscale x 2 x i32> %a
673 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
680 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
681 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
682 ; CHECK: # %bb.0: # %entry
683 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
684 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
687 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
688 <vscale x 2 x i32> %0,
689 <vscale x 4 x i32> %1,
690 <vscale x 2 x i32> %2,
691 <vscale x 4 x i1> %3,
694 ret <vscale x 2 x i32> %a
697 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
703 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
704 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32:
705 ; CHECK: # %bb.0: # %entry
706 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
707 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9
710 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
711 <vscale x 2 x i32> %0,
712 <vscale x 8 x i32> %1,
713 <vscale x 2 x i32> %2,
716 ret <vscale x 2 x i32> %a
719 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
726 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
727 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
730 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
733 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
734 <vscale x 2 x i32> %0,
735 <vscale x 8 x i32> %1,
736 <vscale x 2 x i32> %2,
737 <vscale x 8 x i1> %3,
740 ret <vscale x 2 x i32> %a
743 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
749 define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
750 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32:
751 ; CHECK: # %bb.0: # %entry
752 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
753 ; CHECK-NEXT: vredmaxu.vs v8, v16, v9
756 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
757 <vscale x 2 x i32> %0,
758 <vscale x 16 x i32> %1,
759 <vscale x 2 x i32> %2,
762 ret <vscale x 2 x i32> %a
765 declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
772 define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
773 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
774 ; CHECK: # %bb.0: # %entry
775 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
776 ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
779 %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
780 <vscale x 2 x i32> %0,
781 <vscale x 16 x i32> %1,
782 <vscale x 2 x i32> %2,
783 <vscale x 16 x i1> %3,
786 ret <vscale x 2 x i32> %a
789 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
795 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
796 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
799 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10
802 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
803 <vscale x 1 x i64> %0,
804 <vscale x 1 x i64> %1,
805 <vscale x 1 x i64> %2,
808 ret <vscale x 1 x i64> %a
811 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
818 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
819 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
820 ; CHECK: # %bb.0: # %entry
821 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
822 ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
825 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
826 <vscale x 1 x i64> %0,
827 <vscale x 1 x i64> %1,
828 <vscale x 1 x i64> %2,
829 <vscale x 1 x i1> %3,
832 ret <vscale x 1 x i64> %a
835 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
841 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
842 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
843 ; CHECK: # %bb.0: # %entry
844 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
845 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9
848 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
849 <vscale x 1 x i64> %0,
850 <vscale x 2 x i64> %1,
851 <vscale x 1 x i64> %2,
854 ret <vscale x 1 x i64> %a
857 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
864 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
865 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
866 ; CHECK: # %bb.0: # %entry
867 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
868 ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
871 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
872 <vscale x 1 x i64> %0,
873 <vscale x 2 x i64> %1,
874 <vscale x 1 x i64> %2,
875 <vscale x 2 x i1> %3,
878 ret <vscale x 1 x i64> %a
881 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
887 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
888 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
889 ; CHECK: # %bb.0: # %entry
890 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
891 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9
894 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
895 <vscale x 1 x i64> %0,
896 <vscale x 4 x i64> %1,
897 <vscale x 1 x i64> %2,
900 ret <vscale x 1 x i64> %a
903 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
910 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
911 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
912 ; CHECK: # %bb.0: # %entry
913 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
914 ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
917 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
918 <vscale x 1 x i64> %0,
919 <vscale x 4 x i64> %1,
920 <vscale x 1 x i64> %2,
921 <vscale x 4 x i1> %3,
924 ret <vscale x 1 x i64> %a
927 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
933 define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
934 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
937 ; CHECK-NEXT: vredmaxu.vs v8, v16, v9
940 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
941 <vscale x 1 x i64> %0,
942 <vscale x 8 x i64> %1,
943 <vscale x 1 x i64> %2,
946 ret <vscale x 1 x i64> %a
949 declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
956 define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
957 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
960 ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
963 %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
964 <vscale x 1 x i64> %0,
965 <vscale x 8 x i64> %1,
966 <vscale x 1 x i64> %2,
967 <vscale x 8 x i1> %3,
970 ret <vscale x 1 x i64> %a