1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \
4 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
5 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
7 #include <riscv_vector.h>
9 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8mf8_i8m1_tu
10 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
11 // CHECK-RV64-NEXT: entry:
12 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
13 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
15 vint8m1_t
test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff
, vint8mf8_t vector
, vint8m1_t scalar
, size_t vl
) {
16 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
19 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8mf4_i8m1_tu
20 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
21 // CHECK-RV64-NEXT: entry:
22 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
23 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
25 vint8m1_t
test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff
, vint8mf4_t vector
, vint8m1_t scalar
, size_t vl
) {
26 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
29 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8mf2_i8m1_tu
30 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
31 // CHECK-RV64-NEXT: entry:
32 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
33 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
35 vint8m1_t
test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff
, vint8mf2_t vector
, vint8m1_t scalar
, size_t vl
) {
36 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
39 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m1_i8m1_tu
40 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
41 // CHECK-RV64-NEXT: entry:
42 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
43 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
45 vint8m1_t
test_vredand_vs_i8m1_i8m1_tu(vint8m1_t maskedoff
, vint8m1_t vector
, vint8m1_t scalar
, size_t vl
) {
46 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
49 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m2_i8m1_tu
50 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
51 // CHECK-RV64-NEXT: entry:
52 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
53 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
55 vint8m1_t
test_vredand_vs_i8m2_i8m1_tu(vint8m1_t maskedoff
, vint8m2_t vector
, vint8m1_t scalar
, size_t vl
) {
56 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
59 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m4_i8m1_tu
60 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
61 // CHECK-RV64-NEXT: entry:
62 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
63 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
65 vint8m1_t
test_vredand_vs_i8m4_i8m1_tu(vint8m1_t maskedoff
, vint8m4_t vector
, vint8m1_t scalar
, size_t vl
) {
66 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
69 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m8_i8m1_tu
70 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
71 // CHECK-RV64-NEXT: entry:
72 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
73 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
75 vint8m1_t
test_vredand_vs_i8m8_i8m1_tu(vint8m1_t maskedoff
, vint8m8_t vector
, vint8m1_t scalar
, size_t vl
) {
76 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
79 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16mf4_i16m1_tu
80 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
81 // CHECK-RV64-NEXT: entry:
82 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
83 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
85 vint16m1_t
test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff
, vint16mf4_t vector
, vint16m1_t scalar
, size_t vl
) {
86 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
89 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16mf2_i16m1_tu
90 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
91 // CHECK-RV64-NEXT: entry:
92 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
93 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
95 vint16m1_t
test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff
, vint16mf2_t vector
, vint16m1_t scalar
, size_t vl
) {
96 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
99 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m1_i16m1_tu
100 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
101 // CHECK-RV64-NEXT: entry:
102 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
103 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
105 vint16m1_t
test_vredand_vs_i16m1_i16m1_tu(vint16m1_t maskedoff
, vint16m1_t vector
, vint16m1_t scalar
, size_t vl
) {
106 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
109 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m2_i16m1_tu
110 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
111 // CHECK-RV64-NEXT: entry:
112 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
113 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
115 vint16m1_t
test_vredand_vs_i16m2_i16m1_tu(vint16m1_t maskedoff
, vint16m2_t vector
, vint16m1_t scalar
, size_t vl
) {
116 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
119 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m4_i16m1_tu
120 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
121 // CHECK-RV64-NEXT: entry:
122 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
123 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
125 vint16m1_t
test_vredand_vs_i16m4_i16m1_tu(vint16m1_t maskedoff
, vint16m4_t vector
, vint16m1_t scalar
, size_t vl
) {
126 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
129 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m8_i16m1_tu
130 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
133 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
135 vint16m1_t
test_vredand_vs_i16m8_i16m1_tu(vint16m1_t maskedoff
, vint16m8_t vector
, vint16m1_t scalar
, size_t vl
) {
136 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
139 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32mf2_i32m1_tu
140 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
141 // CHECK-RV64-NEXT: entry:
142 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
143 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
145 vint32m1_t
test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff
, vint32mf2_t vector
, vint32m1_t scalar
, size_t vl
) {
146 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
149 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m1_i32m1_tu
150 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
151 // CHECK-RV64-NEXT: entry:
152 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
153 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
155 vint32m1_t
test_vredand_vs_i32m1_i32m1_tu(vint32m1_t maskedoff
, vint32m1_t vector
, vint32m1_t scalar
, size_t vl
) {
156 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
159 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m2_i32m1_tu
160 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
161 // CHECK-RV64-NEXT: entry:
162 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
163 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
165 vint32m1_t
test_vredand_vs_i32m2_i32m1_tu(vint32m1_t maskedoff
, vint32m2_t vector
, vint32m1_t scalar
, size_t vl
) {
166 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
169 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m4_i32m1_tu
170 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
171 // CHECK-RV64-NEXT: entry:
172 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
173 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
175 vint32m1_t
test_vredand_vs_i32m4_i32m1_tu(vint32m1_t maskedoff
, vint32m4_t vector
, vint32m1_t scalar
, size_t vl
) {
176 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
179 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m8_i32m1_tu
180 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
181 // CHECK-RV64-NEXT: entry:
182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
183 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
185 vint32m1_t
test_vredand_vs_i32m8_i32m1_tu(vint32m1_t maskedoff
, vint32m8_t vector
, vint32m1_t scalar
, size_t vl
) {
186 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
189 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m1_i64m1_tu
190 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
191 // CHECK-RV64-NEXT: entry:
192 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
193 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
195 vint64m1_t
test_vredand_vs_i64m1_i64m1_tu(vint64m1_t maskedoff
, vint64m1_t vector
, vint64m1_t scalar
, size_t vl
) {
196 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
199 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m2_i64m1_tu
200 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
201 // CHECK-RV64-NEXT: entry:
202 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
203 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
205 vint64m1_t
test_vredand_vs_i64m2_i64m1_tu(vint64m1_t maskedoff
, vint64m2_t vector
, vint64m1_t scalar
, size_t vl
) {
206 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
209 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m4_i64m1_tu
210 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
211 // CHECK-RV64-NEXT: entry:
212 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
213 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
215 vint64m1_t
test_vredand_vs_i64m4_i64m1_tu(vint64m1_t maskedoff
, vint64m4_t vector
, vint64m1_t scalar
, size_t vl
) {
216 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
219 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m8_i64m1_tu
220 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
221 // CHECK-RV64-NEXT: entry:
222 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
223 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
225 vint64m1_t
test_vredand_vs_i64m8_i64m1_tu(vint64m1_t maskedoff
, vint64m8_t vector
, vint64m1_t scalar
, size_t vl
) {
226 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
229 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8mf8_u8m1_tu
230 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
231 // CHECK-RV64-NEXT: entry:
232 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
233 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
235 vuint8m1_t
test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff
, vuint8mf8_t vector
, vuint8m1_t scalar
, size_t vl
) {
236 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
239 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8mf4_u8m1_tu
240 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
241 // CHECK-RV64-NEXT: entry:
242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
243 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
245 vuint8m1_t
test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff
, vuint8mf4_t vector
, vuint8m1_t scalar
, size_t vl
) {
246 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
249 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8mf2_u8m1_tu
250 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
251 // CHECK-RV64-NEXT: entry:
252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
253 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
255 vuint8m1_t
test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff
, vuint8mf2_t vector
, vuint8m1_t scalar
, size_t vl
) {
256 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
259 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m1_u8m1_tu
260 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
261 // CHECK-RV64-NEXT: entry:
262 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
263 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
265 vuint8m1_t
test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff
, vuint8m1_t vector
, vuint8m1_t scalar
, size_t vl
) {
266 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
269 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m2_u8m1_tu
270 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
271 // CHECK-RV64-NEXT: entry:
272 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
273 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
275 vuint8m1_t
test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff
, vuint8m2_t vector
, vuint8m1_t scalar
, size_t vl
) {
276 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
279 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m4_u8m1_tu
280 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
281 // CHECK-RV64-NEXT: entry:
282 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
283 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
285 vuint8m1_t
test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff
, vuint8m4_t vector
, vuint8m1_t scalar
, size_t vl
) {
286 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
289 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m8_u8m1_tu
290 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
291 // CHECK-RV64-NEXT: entry:
292 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
293 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
295 vuint8m1_t
test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff
, vuint8m8_t vector
, vuint8m1_t scalar
, size_t vl
) {
296 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
299 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16mf4_u16m1_tu
300 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
301 // CHECK-RV64-NEXT: entry:
302 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
303 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
305 vuint16m1_t
test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff
, vuint16mf4_t vector
, vuint16m1_t scalar
, size_t vl
) {
306 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
309 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16mf2_u16m1_tu
310 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
311 // CHECK-RV64-NEXT: entry:
312 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
313 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
315 vuint16m1_t
test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff
, vuint16mf2_t vector
, vuint16m1_t scalar
, size_t vl
) {
316 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
319 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m1_u16m1_tu
320 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
321 // CHECK-RV64-NEXT: entry:
322 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
323 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
325 vuint16m1_t
test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff
, vuint16m1_t vector
, vuint16m1_t scalar
, size_t vl
) {
326 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
329 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m2_u16m1_tu
330 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
331 // CHECK-RV64-NEXT: entry:
332 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
333 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
335 vuint16m1_t
test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff
, vuint16m2_t vector
, vuint16m1_t scalar
, size_t vl
) {
336 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
339 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m4_u16m1_tu
340 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
341 // CHECK-RV64-NEXT: entry:
342 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
343 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
345 vuint16m1_t
test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff
, vuint16m4_t vector
, vuint16m1_t scalar
, size_t vl
) {
346 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
349 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m8_u16m1_tu
350 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
351 // CHECK-RV64-NEXT: entry:
352 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
353 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
355 vuint16m1_t
test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff
, vuint16m8_t vector
, vuint16m1_t scalar
, size_t vl
) {
356 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
359 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32mf2_u32m1_tu
360 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
361 // CHECK-RV64-NEXT: entry:
362 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
363 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
365 vuint32m1_t
test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff
, vuint32mf2_t vector
, vuint32m1_t scalar
, size_t vl
) {
366 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
369 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m1_u32m1_tu
370 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
371 // CHECK-RV64-NEXT: entry:
372 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
373 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
375 vuint32m1_t
test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff
, vuint32m1_t vector
, vuint32m1_t scalar
, size_t vl
) {
376 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
379 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m2_u32m1_tu
380 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
381 // CHECK-RV64-NEXT: entry:
382 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
383 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
385 vuint32m1_t
test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff
, vuint32m2_t vector
, vuint32m1_t scalar
, size_t vl
) {
386 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
389 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m4_u32m1_tu
390 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
391 // CHECK-RV64-NEXT: entry:
392 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
393 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
395 vuint32m1_t
test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff
, vuint32m4_t vector
, vuint32m1_t scalar
, size_t vl
) {
396 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
399 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m8_u32m1_tu
400 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
401 // CHECK-RV64-NEXT: entry:
402 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
403 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
405 vuint32m1_t
test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff
, vuint32m8_t vector
, vuint32m1_t scalar
, size_t vl
) {
406 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
409 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m1_u64m1_tu
410 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
411 // CHECK-RV64-NEXT: entry:
412 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
413 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
415 vuint64m1_t
test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff
, vuint64m1_t vector
, vuint64m1_t scalar
, size_t vl
) {
416 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
419 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m2_u64m1_tu
420 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
421 // CHECK-RV64-NEXT: entry:
422 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
423 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
425 vuint64m1_t
test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff
, vuint64m2_t vector
, vuint64m1_t scalar
, size_t vl
) {
426 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
429 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m4_u64m1_tu
430 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
431 // CHECK-RV64-NEXT: entry:
432 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
433 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
435 vuint64m1_t
test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff
, vuint64m4_t vector
, vuint64m1_t scalar
, size_t vl
) {
436 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
439 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m8_u64m1_tu
440 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
441 // CHECK-RV64-NEXT: entry:
442 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
443 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
445 vuint64m1_t
test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff
, vuint64m8_t vector
, vuint64m1_t scalar
, size_t vl
) {
446 return __riscv_vredand_tu(maskedoff
, vector
, scalar
, vl
);
449 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8mf8_i8m1_tum
450 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
451 // CHECK-RV64-NEXT: entry:
452 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
453 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
455 vint8m1_t
test_vredand_vs_i8mf8_i8m1_tum(vbool64_t mask
, vint8m1_t maskedoff
, vint8mf8_t vector
, vint8m1_t scalar
, size_t vl
) {
456 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
459 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8mf4_i8m1_tum
460 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
461 // CHECK-RV64-NEXT: entry:
462 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
463 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
465 vint8m1_t
test_vredand_vs_i8mf4_i8m1_tum(vbool32_t mask
, vint8m1_t maskedoff
, vint8mf4_t vector
, vint8m1_t scalar
, size_t vl
) {
466 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
469 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8mf2_i8m1_tum
470 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
471 // CHECK-RV64-NEXT: entry:
472 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
473 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
475 vint8m1_t
test_vredand_vs_i8mf2_i8m1_tum(vbool16_t mask
, vint8m1_t maskedoff
, vint8mf2_t vector
, vint8m1_t scalar
, size_t vl
) {
476 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
479 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m1_i8m1_tum
480 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
481 // CHECK-RV64-NEXT: entry:
482 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
483 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
485 vint8m1_t
test_vredand_vs_i8m1_i8m1_tum(vbool8_t mask
, vint8m1_t maskedoff
, vint8m1_t vector
, vint8m1_t scalar
, size_t vl
) {
486 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
489 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m2_i8m1_tum
490 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
491 // CHECK-RV64-NEXT: entry:
492 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
493 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
495 vint8m1_t
test_vredand_vs_i8m2_i8m1_tum(vbool4_t mask
, vint8m1_t maskedoff
, vint8m2_t vector
, vint8m1_t scalar
, size_t vl
) {
496 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
499 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m4_i8m1_tum
500 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
501 // CHECK-RV64-NEXT: entry:
502 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
503 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
505 vint8m1_t
test_vredand_vs_i8m4_i8m1_tum(vbool2_t mask
, vint8m1_t maskedoff
, vint8m4_t vector
, vint8m1_t scalar
, size_t vl
) {
506 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
509 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_i8m8_i8m1_tum
510 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
511 // CHECK-RV64-NEXT: entry:
512 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 64 x i1> [[MASK]], i64 [[VL]])
513 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
515 vint8m1_t
test_vredand_vs_i8m8_i8m1_tum(vbool1_t mask
, vint8m1_t maskedoff
, vint8m8_t vector
, vint8m1_t scalar
, size_t vl
) {
516 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
519 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16mf4_i16m1_tum
520 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
521 // CHECK-RV64-NEXT: entry:
522 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
523 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
525 vint16m1_t
test_vredand_vs_i16mf4_i16m1_tum(vbool64_t mask
, vint16m1_t maskedoff
, vint16mf4_t vector
, vint16m1_t scalar
, size_t vl
) {
526 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
529 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16mf2_i16m1_tum
530 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
531 // CHECK-RV64-NEXT: entry:
532 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
533 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
535 vint16m1_t
test_vredand_vs_i16mf2_i16m1_tum(vbool32_t mask
, vint16m1_t maskedoff
, vint16mf2_t vector
, vint16m1_t scalar
, size_t vl
) {
536 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
539 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m1_i16m1_tum
540 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
541 // CHECK-RV64-NEXT: entry:
542 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
543 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
545 vint16m1_t
test_vredand_vs_i16m1_i16m1_tum(vbool16_t mask
, vint16m1_t maskedoff
, vint16m1_t vector
, vint16m1_t scalar
, size_t vl
) {
546 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
549 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m2_i16m1_tum
550 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
551 // CHECK-RV64-NEXT: entry:
552 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
553 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
555 vint16m1_t
test_vredand_vs_i16m2_i16m1_tum(vbool8_t mask
, vint16m1_t maskedoff
, vint16m2_t vector
, vint16m1_t scalar
, size_t vl
) {
556 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
559 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m4_i16m1_tum
560 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
561 // CHECK-RV64-NEXT: entry:
562 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
563 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
565 vint16m1_t
test_vredand_vs_i16m4_i16m1_tum(vbool4_t mask
, vint16m1_t maskedoff
, vint16m4_t vector
, vint16m1_t scalar
, size_t vl
) {
566 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
569 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_i16m8_i16m1_tum
570 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
571 // CHECK-RV64-NEXT: entry:
572 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
573 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
575 vint16m1_t
test_vredand_vs_i16m8_i16m1_tum(vbool2_t mask
, vint16m1_t maskedoff
, vint16m8_t vector
, vint16m1_t scalar
, size_t vl
) {
576 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
579 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32mf2_i32m1_tum
580 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
581 // CHECK-RV64-NEXT: entry:
582 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
583 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
585 vint32m1_t
test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask
, vint32m1_t maskedoff
, vint32mf2_t vector
, vint32m1_t scalar
, size_t vl
) {
586 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
589 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m1_i32m1_tum
590 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
591 // CHECK-RV64-NEXT: entry:
592 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
593 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
595 vint32m1_t
test_vredand_vs_i32m1_i32m1_tum(vbool32_t mask
, vint32m1_t maskedoff
, vint32m1_t vector
, vint32m1_t scalar
, size_t vl
) {
596 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
599 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m2_i32m1_tum
600 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
601 // CHECK-RV64-NEXT: entry:
602 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
603 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
605 vint32m1_t
test_vredand_vs_i32m2_i32m1_tum(vbool16_t mask
, vint32m1_t maskedoff
, vint32m2_t vector
, vint32m1_t scalar
, size_t vl
) {
606 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
609 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m4_i32m1_tum
610 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
611 // CHECK-RV64-NEXT: entry:
612 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
613 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
615 vint32m1_t
test_vredand_vs_i32m4_i32m1_tum(vbool8_t mask
, vint32m1_t maskedoff
, vint32m4_t vector
, vint32m1_t scalar
, size_t vl
) {
616 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
619 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_i32m8_i32m1_tum
620 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
621 // CHECK-RV64-NEXT: entry:
622 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
623 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
625 vint32m1_t
test_vredand_vs_i32m8_i32m1_tum(vbool4_t mask
, vint32m1_t maskedoff
, vint32m8_t vector
, vint32m1_t scalar
, size_t vl
) {
626 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
629 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m1_i64m1_tum
630 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
631 // CHECK-RV64-NEXT: entry:
632 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
633 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
635 vint64m1_t
test_vredand_vs_i64m1_i64m1_tum(vbool64_t mask
, vint64m1_t maskedoff
, vint64m1_t vector
, vint64m1_t scalar
, size_t vl
) {
636 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
639 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m2_i64m1_tum
640 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
641 // CHECK-RV64-NEXT: entry:
642 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
643 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
645 vint64m1_t
test_vredand_vs_i64m2_i64m1_tum(vbool32_t mask
, vint64m1_t maskedoff
, vint64m2_t vector
, vint64m1_t scalar
, size_t vl
) {
646 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
649 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m4_i64m1_tum
650 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
651 // CHECK-RV64-NEXT: entry:
652 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
653 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
655 vint64m1_t
test_vredand_vs_i64m4_i64m1_tum(vbool16_t mask
, vint64m1_t maskedoff
, vint64m4_t vector
, vint64m1_t scalar
, size_t vl
) {
656 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
659 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_i64m8_i64m1_tum
660 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
661 // CHECK-RV64-NEXT: entry:
662 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
663 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
665 vint64m1_t
test_vredand_vs_i64m8_i64m1_tum(vbool8_t mask
, vint64m1_t maskedoff
, vint64m8_t vector
, vint64m1_t scalar
, size_t vl
) {
666 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
669 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8mf8_u8m1_tum
670 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
671 // CHECK-RV64-NEXT: entry:
672 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 1 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
673 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
675 vuint8m1_t
test_vredand_vs_u8mf8_u8m1_tum(vbool64_t mask
, vuint8m1_t maskedoff
, vuint8mf8_t vector
, vuint8m1_t scalar
, size_t vl
) {
676 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
679 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8mf4_u8m1_tum
680 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
681 // CHECK-RV64-NEXT: entry:
682 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 2 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
683 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
685 vuint8m1_t
test_vredand_vs_u8mf4_u8m1_tum(vbool32_t mask
, vuint8m1_t maskedoff
, vuint8mf4_t vector
, vuint8m1_t scalar
, size_t vl
) {
686 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
689 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8mf2_u8m1_tum
690 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
691 // CHECK-RV64-NEXT: entry:
692 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 4 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
693 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
695 vuint8m1_t
test_vredand_vs_u8mf2_u8m1_tum(vbool16_t mask
, vuint8m1_t maskedoff
, vuint8mf2_t vector
, vuint8m1_t scalar
, size_t vl
) {
696 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
699 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m1_u8m1_tum
700 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
701 // CHECK-RV64-NEXT: entry:
702 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 8 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
703 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
705 vuint8m1_t
test_vredand_vs_u8m1_u8m1_tum(vbool8_t mask
, vuint8m1_t maskedoff
, vuint8m1_t vector
, vuint8m1_t scalar
, size_t vl
) {
706 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
709 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m2_u8m1_tum
710 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
711 // CHECK-RV64-NEXT: entry:
712 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 16 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
713 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
715 vuint8m1_t
test_vredand_vs_u8m2_u8m1_tum(vbool4_t mask
, vuint8m1_t maskedoff
, vuint8m2_t vector
, vuint8m1_t scalar
, size_t vl
) {
716 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
719 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m4_u8m1_tum
720 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
721 // CHECK-RV64-NEXT: entry:
722 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 32 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
723 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
725 vuint8m1_t
test_vredand_vs_u8m4_u8m1_tum(vbool2_t mask
, vuint8m1_t maskedoff
, vuint8m4_t vector
, vuint8m1_t scalar
, size_t vl
) {
726 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
729 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredand_vs_u8m8_u8m1_tum
730 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
731 // CHECK-RV64-NEXT: entry:
732 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF]], <vscale x 64 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 64 x i1> [[MASK]], i64 [[VL]])
733 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
735 vuint8m1_t
test_vredand_vs_u8m8_u8m1_tum(vbool1_t mask
, vuint8m1_t maskedoff
, vuint8m8_t vector
, vuint8m1_t scalar
, size_t vl
) {
736 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
739 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16mf4_u16m1_tum
740 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
741 // CHECK-RV64-NEXT: entry:
742 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 1 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
743 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
745 vuint16m1_t
test_vredand_vs_u16mf4_u16m1_tum(vbool64_t mask
, vuint16m1_t maskedoff
, vuint16mf4_t vector
, vuint16m1_t scalar
, size_t vl
) {
746 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
749 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16mf2_u16m1_tum
750 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
751 // CHECK-RV64-NEXT: entry:
752 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 2 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
753 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
755 vuint16m1_t
test_vredand_vs_u16mf2_u16m1_tum(vbool32_t mask
, vuint16m1_t maskedoff
, vuint16mf2_t vector
, vuint16m1_t scalar
, size_t vl
) {
756 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
759 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m1_u16m1_tum
760 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
761 // CHECK-RV64-NEXT: entry:
762 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 4 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
763 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
765 vuint16m1_t
test_vredand_vs_u16m1_u16m1_tum(vbool16_t mask
, vuint16m1_t maskedoff
, vuint16m1_t vector
, vuint16m1_t scalar
, size_t vl
) {
766 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
769 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m2_u16m1_tum
770 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
771 // CHECK-RV64-NEXT: entry:
772 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 8 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
773 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
775 vuint16m1_t
test_vredand_vs_u16m2_u16m1_tum(vbool8_t mask
, vuint16m1_t maskedoff
, vuint16m2_t vector
, vuint16m1_t scalar
, size_t vl
) {
776 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
779 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m4_u16m1_tum
780 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
781 // CHECK-RV64-NEXT: entry:
782 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 16 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
783 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
785 vuint16m1_t
test_vredand_vs_u16m4_u16m1_tum(vbool4_t mask
, vuint16m1_t maskedoff
, vuint16m4_t vector
, vuint16m1_t scalar
, size_t vl
) {
786 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
789 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredand_vs_u16m8_u16m1_tum
790 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
791 // CHECK-RV64-NEXT: entry:
792 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF]], <vscale x 32 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
793 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
795 vuint16m1_t
test_vredand_vs_u16m8_u16m1_tum(vbool2_t mask
, vuint16m1_t maskedoff
, vuint16m8_t vector
, vuint16m1_t scalar
, size_t vl
) {
796 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
799 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32mf2_u32m1_tum
800 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
801 // CHECK-RV64-NEXT: entry:
802 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 1 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
803 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
805 vuint32m1_t
test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask
, vuint32m1_t maskedoff
, vuint32mf2_t vector
, vuint32m1_t scalar
, size_t vl
) {
806 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
809 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m1_u32m1_tum
810 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
811 // CHECK-RV64-NEXT: entry:
812 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
813 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
815 vuint32m1_t
test_vredand_vs_u32m1_u32m1_tum(vbool32_t mask
, vuint32m1_t maskedoff
, vuint32m1_t vector
, vuint32m1_t scalar
, size_t vl
) {
816 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
819 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m2_u32m1_tum
820 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
821 // CHECK-RV64-NEXT: entry:
822 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 4 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
823 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
825 vuint32m1_t
test_vredand_vs_u32m2_u32m1_tum(vbool16_t mask
, vuint32m1_t maskedoff
, vuint32m2_t vector
, vuint32m1_t scalar
, size_t vl
) {
826 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
829 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m4_u32m1_tum
830 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
831 // CHECK-RV64-NEXT: entry:
832 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 8 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
833 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
835 vuint32m1_t
test_vredand_vs_u32m4_u32m1_tum(vbool8_t mask
, vuint32m1_t maskedoff
, vuint32m4_t vector
, vuint32m1_t scalar
, size_t vl
) {
836 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
839 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredand_vs_u32m8_u32m1_tum
840 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
841 // CHECK-RV64-NEXT: entry:
842 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 16 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
843 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
845 vuint32m1_t
test_vredand_vs_u32m8_u32m1_tum(vbool4_t mask
, vuint32m1_t maskedoff
, vuint32m8_t vector
, vuint32m1_t scalar
, size_t vl
) {
846 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
849 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m1_u64m1_tum
850 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
851 // CHECK-RV64-NEXT: entry:
852 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
853 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
855 vuint64m1_t
test_vredand_vs_u64m1_u64m1_tum(vbool64_t mask
, vuint64m1_t maskedoff
, vuint64m1_t vector
, vuint64m1_t scalar
, size_t vl
) {
856 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
859 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m2_u64m1_tum
860 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
861 // CHECK-RV64-NEXT: entry:
862 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 2 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
863 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
865 vuint64m1_t
test_vredand_vs_u64m2_u64m1_tum(vbool32_t mask
, vuint64m1_t maskedoff
, vuint64m2_t vector
, vuint64m1_t scalar
, size_t vl
) {
866 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
869 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m4_u64m1_tum
870 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
871 // CHECK-RV64-NEXT: entry:
872 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 4 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
873 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
875 vuint64m1_t
test_vredand_vs_u64m4_u64m1_tum(vbool16_t mask
, vuint64m1_t maskedoff
, vuint64m4_t vector
, vuint64m1_t scalar
, size_t vl
) {
876 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);
879 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredand_vs_u64m8_u64m1_tum
880 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
881 // CHECK-RV64-NEXT: entry:
882 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 8 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
883 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
885 vuint64m1_t
test_vredand_vs_u64m8_u64m1_tum(vbool8_t mask
, vuint64m1_t maskedoff
, vuint64m8_t vector
, vuint64m1_t scalar
, size_t vl
) {
886 return __riscv_vredand_tum(mask
, maskedoff
, vector
, scalar
, vl
);