1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \
4 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
5 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
7 #include <riscv_vector.h>
9 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8mf8_i8m1
10 // CHECK-RV64-SAME: (<vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
11 // CHECK-RV64-NEXT: entry:
12 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
13 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
15 vint8m1_t
test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector
, vint8m1_t scalar
, size_t vl
) {
16 return __riscv_vredmax_vs_i8mf8_i8m1(vector
, scalar
, vl
);
19 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8mf4_i8m1
20 // CHECK-RV64-SAME: (<vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
21 // CHECK-RV64-NEXT: entry:
22 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
23 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
25 vint8m1_t
test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector
, vint8m1_t scalar
, size_t vl
) {
26 return __riscv_vredmax_vs_i8mf4_i8m1(vector
, scalar
, vl
);
29 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8mf2_i8m1
30 // CHECK-RV64-SAME: (<vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
31 // CHECK-RV64-NEXT: entry:
32 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
33 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
35 vint8m1_t
test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector
, vint8m1_t scalar
, size_t vl
) {
36 return __riscv_vredmax_vs_i8mf2_i8m1(vector
, scalar
, vl
);
39 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m1_i8m1
40 // CHECK-RV64-SAME: (<vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
41 // CHECK-RV64-NEXT: entry:
42 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
43 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
45 vint8m1_t
test_vredmax_vs_i8m1_i8m1(vint8m1_t vector
, vint8m1_t scalar
, size_t vl
) {
46 return __riscv_vredmax_vs_i8m1_i8m1(vector
, scalar
, vl
);
49 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m2_i8m1
50 // CHECK-RV64-SAME: (<vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
51 // CHECK-RV64-NEXT: entry:
52 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
53 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
55 vint8m1_t
test_vredmax_vs_i8m2_i8m1(vint8m2_t vector
, vint8m1_t scalar
, size_t vl
) {
56 return __riscv_vredmax_vs_i8m2_i8m1(vector
, scalar
, vl
);
59 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m4_i8m1
60 // CHECK-RV64-SAME: (<vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
61 // CHECK-RV64-NEXT: entry:
62 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
63 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
65 vint8m1_t
test_vredmax_vs_i8m4_i8m1(vint8m4_t vector
, vint8m1_t scalar
, size_t vl
) {
66 return __riscv_vredmax_vs_i8m4_i8m1(vector
, scalar
, vl
);
69 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m8_i8m1
70 // CHECK-RV64-SAME: (<vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
71 // CHECK-RV64-NEXT: entry:
72 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], i64 [[VL]])
73 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
75 vint8m1_t
test_vredmax_vs_i8m8_i8m1(vint8m8_t vector
, vint8m1_t scalar
, size_t vl
) {
76 return __riscv_vredmax_vs_i8m8_i8m1(vector
, scalar
, vl
);
79 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16mf4_i16m1
80 // CHECK-RV64-SAME: (<vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
81 // CHECK-RV64-NEXT: entry:
82 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
83 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
85 vint16m1_t
test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector
, vint16m1_t scalar
, size_t vl
) {
86 return __riscv_vredmax_vs_i16mf4_i16m1(vector
, scalar
, vl
);
89 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16mf2_i16m1
90 // CHECK-RV64-SAME: (<vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
91 // CHECK-RV64-NEXT: entry:
92 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
93 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
95 vint16m1_t
test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector
, vint16m1_t scalar
, size_t vl
) {
96 return __riscv_vredmax_vs_i16mf2_i16m1(vector
, scalar
, vl
);
99 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m1_i16m1
100 // CHECK-RV64-SAME: (<vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
101 // CHECK-RV64-NEXT: entry:
102 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
103 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
105 vint16m1_t
test_vredmax_vs_i16m1_i16m1(vint16m1_t vector
, vint16m1_t scalar
, size_t vl
) {
106 return __riscv_vredmax_vs_i16m1_i16m1(vector
, scalar
, vl
);
109 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m2_i16m1
110 // CHECK-RV64-SAME: (<vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
111 // CHECK-RV64-NEXT: entry:
112 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
113 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
115 vint16m1_t
test_vredmax_vs_i16m2_i16m1(vint16m2_t vector
, vint16m1_t scalar
, size_t vl
) {
116 return __riscv_vredmax_vs_i16m2_i16m1(vector
, scalar
, vl
);
119 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m4_i16m1
120 // CHECK-RV64-SAME: (<vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
121 // CHECK-RV64-NEXT: entry:
122 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
123 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
125 vint16m1_t
test_vredmax_vs_i16m4_i16m1(vint16m4_t vector
, vint16m1_t scalar
, size_t vl
) {
126 return __riscv_vredmax_vs_i16m4_i16m1(vector
, scalar
, vl
);
129 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m8_i16m1
130 // CHECK-RV64-SAME: (<vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], i64 [[VL]])
133 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
135 vint16m1_t
test_vredmax_vs_i16m8_i16m1(vint16m8_t vector
, vint16m1_t scalar
, size_t vl
) {
136 return __riscv_vredmax_vs_i16m8_i16m1(vector
, scalar
, vl
);
139 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32mf2_i32m1
140 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
141 // CHECK-RV64-NEXT: entry:
142 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
143 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
145 vint32m1_t
test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector
, vint32m1_t scalar
, size_t vl
) {
146 return __riscv_vredmax_vs_i32mf2_i32m1(vector
, scalar
, vl
);
149 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m1_i32m1
150 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
151 // CHECK-RV64-NEXT: entry:
152 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
153 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
155 vint32m1_t
test_vredmax_vs_i32m1_i32m1(vint32m1_t vector
, vint32m1_t scalar
, size_t vl
) {
156 return __riscv_vredmax_vs_i32m1_i32m1(vector
, scalar
, vl
);
159 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m2_i32m1
160 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
161 // CHECK-RV64-NEXT: entry:
162 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
163 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
165 vint32m1_t
test_vredmax_vs_i32m2_i32m1(vint32m2_t vector
, vint32m1_t scalar
, size_t vl
) {
166 return __riscv_vredmax_vs_i32m2_i32m1(vector
, scalar
, vl
);
169 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m4_i32m1
170 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
171 // CHECK-RV64-NEXT: entry:
172 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
173 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
175 vint32m1_t
test_vredmax_vs_i32m4_i32m1(vint32m4_t vector
, vint32m1_t scalar
, size_t vl
) {
176 return __riscv_vredmax_vs_i32m4_i32m1(vector
, scalar
, vl
);
179 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m8_i32m1
180 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
181 // CHECK-RV64-NEXT: entry:
182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], i64 [[VL]])
183 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
185 vint32m1_t
test_vredmax_vs_i32m8_i32m1(vint32m8_t vector
, vint32m1_t scalar
, size_t vl
) {
186 return __riscv_vredmax_vs_i32m8_i32m1(vector
, scalar
, vl
);
189 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m1_i64m1
190 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
191 // CHECK-RV64-NEXT: entry:
192 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
193 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
195 vint64m1_t
test_vredmax_vs_i64m1_i64m1(vint64m1_t vector
, vint64m1_t scalar
, size_t vl
) {
196 return __riscv_vredmax_vs_i64m1_i64m1(vector
, scalar
, vl
);
199 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m2_i64m1
200 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
201 // CHECK-RV64-NEXT: entry:
202 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
203 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
205 vint64m1_t
test_vredmax_vs_i64m2_i64m1(vint64m2_t vector
, vint64m1_t scalar
, size_t vl
) {
206 return __riscv_vredmax_vs_i64m2_i64m1(vector
, scalar
, vl
);
209 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m4_i64m1
210 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
211 // CHECK-RV64-NEXT: entry:
212 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
213 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
215 vint64m1_t
test_vredmax_vs_i64m4_i64m1(vint64m4_t vector
, vint64m1_t scalar
, size_t vl
) {
216 return __riscv_vredmax_vs_i64m4_i64m1(vector
, scalar
, vl
);
219 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m8_i64m1
220 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
221 // CHECK-RV64-NEXT: entry:
222 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], i64 [[VL]])
223 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
225 vint64m1_t
test_vredmax_vs_i64m8_i64m1(vint64m8_t vector
, vint64m1_t scalar
, size_t vl
) {
226 return __riscv_vredmax_vs_i64m8_i64m1(vector
, scalar
, vl
);
229 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8mf8_i8m1_m
230 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
231 // CHECK-RV64-NEXT: entry:
232 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
233 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
235 vint8m1_t
test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask
, vint8mf8_t vector
, vint8m1_t scalar
, size_t vl
) {
236 return __riscv_vredmax_vs_i8mf8_i8m1_m(mask
, vector
, scalar
, vl
);
239 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8mf4_i8m1_m
240 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
241 // CHECK-RV64-NEXT: entry:
242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
243 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
245 vint8m1_t
test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask
, vint8mf4_t vector
, vint8m1_t scalar
, size_t vl
) {
246 return __riscv_vredmax_vs_i8mf4_i8m1_m(mask
, vector
, scalar
, vl
);
249 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8mf2_i8m1_m
250 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
251 // CHECK-RV64-NEXT: entry:
252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
253 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
255 vint8m1_t
test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask
, vint8mf2_t vector
, vint8m1_t scalar
, size_t vl
) {
256 return __riscv_vredmax_vs_i8mf2_i8m1_m(mask
, vector
, scalar
, vl
);
259 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m1_i8m1_m
260 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
261 // CHECK-RV64-NEXT: entry:
262 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
263 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
265 vint8m1_t
test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask
, vint8m1_t vector
, vint8m1_t scalar
, size_t vl
) {
266 return __riscv_vredmax_vs_i8m1_i8m1_m(mask
, vector
, scalar
, vl
);
269 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m2_i8m1_m
270 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
271 // CHECK-RV64-NEXT: entry:
272 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
273 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
275 vint8m1_t
test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask
, vint8m2_t vector
, vint8m1_t scalar
, size_t vl
) {
276 return __riscv_vredmax_vs_i8m2_i8m1_m(mask
, vector
, scalar
, vl
);
279 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m4_i8m1_m
280 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
281 // CHECK-RV64-NEXT: entry:
282 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
283 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
285 vint8m1_t
test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask
, vint8m4_t vector
, vint8m1_t scalar
, size_t vl
) {
286 return __riscv_vredmax_vs_i8m4_i8m1_m(mask
, vector
, scalar
, vl
);
289 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vredmax_vs_i8m8_i8m1_m
290 // CHECK-RV64-SAME: (<vscale x 64 x i1> [[MASK:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
291 // CHECK-RV64-NEXT: entry:
292 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR]], <vscale x 8 x i8> [[SCALAR]], <vscale x 64 x i1> [[MASK]], i64 [[VL]])
293 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
295 vint8m1_t
test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask
, vint8m8_t vector
, vint8m1_t scalar
, size_t vl
) {
296 return __riscv_vredmax_vs_i8m8_i8m1_m(mask
, vector
, scalar
, vl
);
299 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16mf4_i16m1_m
300 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
301 // CHECK-RV64-NEXT: entry:
302 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
303 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
305 vint16m1_t
test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask
, vint16mf4_t vector
, vint16m1_t scalar
, size_t vl
) {
306 return __riscv_vredmax_vs_i16mf4_i16m1_m(mask
, vector
, scalar
, vl
);
309 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16mf2_i16m1_m
310 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
311 // CHECK-RV64-NEXT: entry:
312 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
313 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
315 vint16m1_t
test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask
, vint16mf2_t vector
, vint16m1_t scalar
, size_t vl
) {
316 return __riscv_vredmax_vs_i16mf2_i16m1_m(mask
, vector
, scalar
, vl
);
319 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m1_i16m1_m
320 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
321 // CHECK-RV64-NEXT: entry:
322 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
323 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
325 vint16m1_t
test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask
, vint16m1_t vector
, vint16m1_t scalar
, size_t vl
) {
326 return __riscv_vredmax_vs_i16m1_i16m1_m(mask
, vector
, scalar
, vl
);
329 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m2_i16m1_m
330 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
331 // CHECK-RV64-NEXT: entry:
332 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
333 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
335 vint16m1_t
test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask
, vint16m2_t vector
, vint16m1_t scalar
, size_t vl
) {
336 return __riscv_vredmax_vs_i16m2_i16m1_m(mask
, vector
, scalar
, vl
);
339 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m4_i16m1_m
340 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
341 // CHECK-RV64-NEXT: entry:
342 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
343 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
345 vint16m1_t
test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask
, vint16m4_t vector
, vint16m1_t scalar
, size_t vl
) {
346 return __riscv_vredmax_vs_i16m4_i16m1_m(mask
, vector
, scalar
, vl
);
349 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vredmax_vs_i16m8_i16m1_m
350 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
351 // CHECK-RV64-NEXT: entry:
352 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR]], <vscale x 4 x i16> [[SCALAR]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
353 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
355 vint16m1_t
test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask
, vint16m8_t vector
, vint16m1_t scalar
, size_t vl
) {
356 return __riscv_vredmax_vs_i16m8_i16m1_m(mask
, vector
, scalar
, vl
);
359 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32mf2_i32m1_m
360 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
361 // CHECK-RV64-NEXT: entry:
362 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
363 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
365 vint32m1_t
test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask
, vint32mf2_t vector
, vint32m1_t scalar
, size_t vl
) {
366 return __riscv_vredmax_vs_i32mf2_i32m1_m(mask
, vector
, scalar
, vl
);
369 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m1_i32m1_m
370 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
371 // CHECK-RV64-NEXT: entry:
372 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
373 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
375 vint32m1_t
test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask
, vint32m1_t vector
, vint32m1_t scalar
, size_t vl
) {
376 return __riscv_vredmax_vs_i32m1_i32m1_m(mask
, vector
, scalar
, vl
);
379 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m2_i32m1_m
380 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
381 // CHECK-RV64-NEXT: entry:
382 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
383 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
385 vint32m1_t
test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask
, vint32m2_t vector
, vint32m1_t scalar
, size_t vl
) {
386 return __riscv_vredmax_vs_i32m2_i32m1_m(mask
, vector
, scalar
, vl
);
389 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m4_i32m1_m
390 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
391 // CHECK-RV64-NEXT: entry:
392 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
393 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
395 vint32m1_t
test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask
, vint32m4_t vector
, vint32m1_t scalar
, size_t vl
) {
396 return __riscv_vredmax_vs_i32m4_i32m1_m(mask
, vector
, scalar
, vl
);
399 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vredmax_vs_i32m8_i32m1_m
400 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
401 // CHECK-RV64-NEXT: entry:
402 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR]], <vscale x 2 x i32> [[SCALAR]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
403 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
405 vint32m1_t
test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask
, vint32m8_t vector
, vint32m1_t scalar
, size_t vl
) {
406 return __riscv_vredmax_vs_i32m8_i32m1_m(mask
, vector
, scalar
, vl
);
409 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m1_i64m1_m
410 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
411 // CHECK-RV64-NEXT: entry:
412 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
413 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
415 vint64m1_t
test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask
, vint64m1_t vector
, vint64m1_t scalar
, size_t vl
) {
416 return __riscv_vredmax_vs_i64m1_i64m1_m(mask
, vector
, scalar
, vl
);
419 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m2_i64m1_m
420 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
421 // CHECK-RV64-NEXT: entry:
422 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
423 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
425 vint64m1_t
test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask
, vint64m2_t vector
, vint64m1_t scalar
, size_t vl
) {
426 return __riscv_vredmax_vs_i64m2_i64m1_m(mask
, vector
, scalar
, vl
);
429 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m4_i64m1_m
430 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
431 // CHECK-RV64-NEXT: entry:
432 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
433 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
435 vint64m1_t
test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask
, vint64m4_t vector
, vint64m1_t scalar
, size_t vl
) {
436 return __riscv_vredmax_vs_i64m4_i64m1_m(mask
, vector
, scalar
, vl
);
439 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vredmax_vs_i64m8_i64m1_m
440 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
441 // CHECK-RV64-NEXT: entry:
442 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR]], <vscale x 1 x i64> [[SCALAR]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
443 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
445 vint64m1_t
test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask
, vint64m8_t vector
, vint64m1_t scalar
, size_t vl
) {
446 return __riscv_vredmax_vs_i64m8_i64m1_m(mask
, vector
, scalar
, vl
);