1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +zve32x -disable-O0-optnone \
4 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
5 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
7 #include <riscv_vector.h>
9 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
14 vint8mf4_t
test_vmulhsu_vv_i8mf4(vint8mf4_t op1
, vuint8mf4_t op2
, size_t vl
) {
15 return __riscv_vmulhsu_vv_i8mf4(op1
, op2
, vl
);
18 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
23 vint8mf4_t
test_vmulhsu_vx_i8mf4(vint8mf4_t op1
, uint8_t op2
, size_t vl
) {
24 return __riscv_vmulhsu_vx_i8mf4(op1
, op2
, vl
);
27 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
28 // CHECK-RV64-NEXT: entry:
29 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
30 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
32 vint8mf2_t
test_vmulhsu_vv_i8mf2(vint8mf2_t op1
, vuint8mf2_t op2
, size_t vl
) {
33 return __riscv_vmulhsu_vv_i8mf2(op1
, op2
, vl
);
36 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
37 // CHECK-RV64-NEXT: entry:
38 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
39 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
41 vint8mf2_t
test_vmulhsu_vx_i8mf2(vint8mf2_t op1
, uint8_t op2
, size_t vl
) {
42 return __riscv_vmulhsu_vx_i8mf2(op1
, op2
, vl
);
45 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
46 // CHECK-RV64-NEXT: entry:
47 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
50 vint8m1_t
test_vmulhsu_vv_i8m1(vint8m1_t op1
, vuint8m1_t op2
, size_t vl
) {
51 return __riscv_vmulhsu_vv_i8m1(op1
, op2
, vl
);
54 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
55 // CHECK-RV64-NEXT: entry:
56 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
57 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
59 vint8m1_t
test_vmulhsu_vx_i8m1(vint8m1_t op1
, uint8_t op2
, size_t vl
) {
60 return __riscv_vmulhsu_vx_i8m1(op1
, op2
, vl
);
63 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
68 vint8m2_t
test_vmulhsu_vv_i8m2(vint8m2_t op1
, vuint8m2_t op2
, size_t vl
) {
69 return __riscv_vmulhsu_vv_i8m2(op1
, op2
, vl
);
72 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
73 // CHECK-RV64-NEXT: entry:
74 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
75 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
77 vint8m2_t
test_vmulhsu_vx_i8m2(vint8m2_t op1
, uint8_t op2
, size_t vl
) {
78 return __riscv_vmulhsu_vx_i8m2(op1
, op2
, vl
);
81 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
84 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
86 vint8m4_t
test_vmulhsu_vv_i8m4(vint8m4_t op1
, vuint8m4_t op2
, size_t vl
) {
87 return __riscv_vmulhsu_vv_i8m4(op1
, op2
, vl
);
90 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
91 // CHECK-RV64-NEXT: entry:
92 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
93 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
95 vint8m4_t
test_vmulhsu_vx_i8m4(vint8m4_t op1
, uint8_t op2
, size_t vl
) {
96 return __riscv_vmulhsu_vx_i8m4(op1
, op2
, vl
);
99 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
100 // CHECK-RV64-NEXT: entry:
101 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
104 vint8m8_t
test_vmulhsu_vv_i8m8(vint8m8_t op1
, vuint8m8_t op2
, size_t vl
) {
105 return __riscv_vmulhsu_vv_i8m8(op1
, op2
, vl
);
108 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
113 vint8m8_t
test_vmulhsu_vx_i8m8(vint8m8_t op1
, uint8_t op2
, size_t vl
) {
114 return __riscv_vmulhsu_vx_i8m8(op1
, op2
, vl
);
117 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
118 // CHECK-RV64-NEXT: entry:
119 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
120 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
122 vint16mf2_t
test_vmulhsu_vv_i16mf2(vint16mf2_t op1
, vuint16mf2_t op2
, size_t vl
) {
123 return __riscv_vmulhsu_vv_i16mf2(op1
, op2
, vl
);
126 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
127 // CHECK-RV64-NEXT: entry:
128 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
129 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
131 vint16mf2_t
test_vmulhsu_vx_i16mf2(vint16mf2_t op1
, uint16_t op2
, size_t vl
) {
132 return __riscv_vmulhsu_vx_i16mf2(op1
, op2
, vl
);
135 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
136 // CHECK-RV64-NEXT: entry:
137 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
138 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
140 vint16m1_t
test_vmulhsu_vv_i16m1(vint16m1_t op1
, vuint16m1_t op2
, size_t vl
) {
141 return __riscv_vmulhsu_vv_i16m1(op1
, op2
, vl
);
144 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
145 // CHECK-RV64-NEXT: entry:
146 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
147 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
149 vint16m1_t
test_vmulhsu_vx_i16m1(vint16m1_t op1
, uint16_t op2
, size_t vl
) {
150 return __riscv_vmulhsu_vx_i16m1(op1
, op2
, vl
);
153 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
154 // CHECK-RV64-NEXT: entry:
155 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
156 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
158 vint16m2_t
test_vmulhsu_vv_i16m2(vint16m2_t op1
, vuint16m2_t op2
, size_t vl
) {
159 return __riscv_vmulhsu_vv_i16m2(op1
, op2
, vl
);
162 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
163 // CHECK-RV64-NEXT: entry:
164 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
165 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
167 vint16m2_t
test_vmulhsu_vx_i16m2(vint16m2_t op1
, uint16_t op2
, size_t vl
) {
168 return __riscv_vmulhsu_vx_i16m2(op1
, op2
, vl
);
171 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
174 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
176 vint16m4_t
test_vmulhsu_vv_i16m4(vint16m4_t op1
, vuint16m4_t op2
, size_t vl
) {
177 return __riscv_vmulhsu_vv_i16m4(op1
, op2
, vl
);
180 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
181 // CHECK-RV64-NEXT: entry:
182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
183 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
185 vint16m4_t
test_vmulhsu_vx_i16m4(vint16m4_t op1
, uint16_t op2
, size_t vl
) {
186 return __riscv_vmulhsu_vx_i16m4(op1
, op2
, vl
);
189 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
190 // CHECK-RV64-NEXT: entry:
191 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
194 vint16m8_t
test_vmulhsu_vv_i16m8(vint16m8_t op1
, vuint16m8_t op2
, size_t vl
) {
195 return __riscv_vmulhsu_vv_i16m8(op1
, op2
, vl
);
198 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
199 // CHECK-RV64-NEXT: entry:
200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
203 vint16m8_t
test_vmulhsu_vx_i16m8(vint16m8_t op1
, uint16_t op2
, size_t vl
) {
204 return __riscv_vmulhsu_vx_i16m8(op1
, op2
, vl
);
207 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
208 // CHECK-RV64-NEXT: entry:
209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
212 vint32m1_t
test_vmulhsu_vv_i32m1(vint32m1_t op1
, vuint32m1_t op2
, size_t vl
) {
213 return __riscv_vmulhsu_vv_i32m1(op1
, op2
, vl
);
216 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
217 // CHECK-RV64-NEXT: entry:
218 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
219 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
221 vint32m1_t
test_vmulhsu_vx_i32m1(vint32m1_t op1
, uint32_t op2
, size_t vl
) {
222 return __riscv_vmulhsu_vx_i32m1(op1
, op2
, vl
);
225 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
226 // CHECK-RV64-NEXT: entry:
227 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
228 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
230 vint32m2_t
test_vmulhsu_vv_i32m2(vint32m2_t op1
, vuint32m2_t op2
, size_t vl
) {
231 return __riscv_vmulhsu_vv_i32m2(op1
, op2
, vl
);
234 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
235 // CHECK-RV64-NEXT: entry:
236 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
237 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
239 vint32m2_t
test_vmulhsu_vx_i32m2(vint32m2_t op1
, uint32_t op2
, size_t vl
) {
240 return __riscv_vmulhsu_vx_i32m2(op1
, op2
, vl
);
243 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
244 // CHECK-RV64-NEXT: entry:
245 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
246 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
248 vint32m4_t
test_vmulhsu_vv_i32m4(vint32m4_t op1
, vuint32m4_t op2
, size_t vl
) {
249 return __riscv_vmulhsu_vv_i32m4(op1
, op2
, vl
);
252 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
253 // CHECK-RV64-NEXT: entry:
254 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
255 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
257 vint32m4_t
test_vmulhsu_vx_i32m4(vint32m4_t op1
, uint32_t op2
, size_t vl
) {
258 return __riscv_vmulhsu_vx_i32m4(op1
, op2
, vl
);
261 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
264 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
266 vint32m8_t
test_vmulhsu_vv_i32m8(vint32m8_t op1
, vuint32m8_t op2
, size_t vl
) {
267 return __riscv_vmulhsu_vv_i32m8(op1
, op2
, vl
);
270 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
271 // CHECK-RV64-NEXT: entry:
272 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
273 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
275 vint32m8_t
test_vmulhsu_vx_i32m8(vint32m8_t op1
, uint32_t op2
, size_t vl
) {
276 return __riscv_vmulhsu_vx_i32m8(op1
, op2
, vl
);
279 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
280 // CHECK-RV64-NEXT: entry:
281 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
282 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
284 vint8mf4_t
test_vmulhsu_vv_i8mf4_m(vbool32_t mask
, vint8mf4_t op1
, vuint8mf4_t op2
, size_t vl
) {
285 return __riscv_vmulhsu_vv_i8mf4_m(mask
, op1
, op2
, vl
);
288 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
289 // CHECK-RV64-NEXT: entry:
290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
291 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
293 vint8mf4_t
test_vmulhsu_vx_i8mf4_m(vbool32_t mask
, vint8mf4_t op1
, uint8_t op2
, size_t vl
) {
294 return __riscv_vmulhsu_vx_i8mf4_m(mask
, op1
, op2
, vl
);
297 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
298 // CHECK-RV64-NEXT: entry:
299 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
300 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
302 vint8mf2_t
test_vmulhsu_vv_i8mf2_m(vbool16_t mask
, vint8mf2_t op1
, vuint8mf2_t op2
, size_t vl
) {
303 return __riscv_vmulhsu_vv_i8mf2_m(mask
, op1
, op2
, vl
);
306 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
307 // CHECK-RV64-NEXT: entry:
308 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
309 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
311 vint8mf2_t
test_vmulhsu_vx_i8mf2_m(vbool16_t mask
, vint8mf2_t op1
, uint8_t op2
, size_t vl
) {
312 return __riscv_vmulhsu_vx_i8mf2_m(mask
, op1
, op2
, vl
);
315 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
316 // CHECK-RV64-NEXT: entry:
317 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
318 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
320 vint8m1_t
test_vmulhsu_vv_i8m1_m(vbool8_t mask
, vint8m1_t op1
, vuint8m1_t op2
, size_t vl
) {
321 return __riscv_vmulhsu_vv_i8m1_m(mask
, op1
, op2
, vl
);
324 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
325 // CHECK-RV64-NEXT: entry:
326 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
327 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
329 vint8m1_t
test_vmulhsu_vx_i8m1_m(vbool8_t mask
, vint8m1_t op1
, uint8_t op2
, size_t vl
) {
330 return __riscv_vmulhsu_vx_i8m1_m(mask
, op1
, op2
, vl
);
333 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
334 // CHECK-RV64-NEXT: entry:
335 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
336 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
338 vint8m2_t
test_vmulhsu_vv_i8m2_m(vbool4_t mask
, vint8m2_t op1
, vuint8m2_t op2
, size_t vl
) {
339 return __riscv_vmulhsu_vv_i8m2_m(mask
, op1
, op2
, vl
);
342 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
343 // CHECK-RV64-NEXT: entry:
344 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
345 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
347 vint8m2_t
test_vmulhsu_vx_i8m2_m(vbool4_t mask
, vint8m2_t op1
, uint8_t op2
, size_t vl
) {
348 return __riscv_vmulhsu_vx_i8m2_m(mask
, op1
, op2
, vl
);
351 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
354 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
356 vint8m4_t
test_vmulhsu_vv_i8m4_m(vbool2_t mask
, vint8m4_t op1
, vuint8m4_t op2
, size_t vl
) {
357 return __riscv_vmulhsu_vv_i8m4_m(mask
, op1
, op2
, vl
);
360 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
361 // CHECK-RV64-NEXT: entry:
362 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
363 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
365 vint8m4_t
test_vmulhsu_vx_i8m4_m(vbool2_t mask
, vint8m4_t op1
, uint8_t op2
, size_t vl
) {
366 return __riscv_vmulhsu_vx_i8m4_m(mask
, op1
, op2
, vl
);
369 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
370 // CHECK-RV64-NEXT: entry:
371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
372 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
374 vint8m8_t
test_vmulhsu_vv_i8m8_m(vbool1_t mask
, vint8m8_t op1
, vuint8m8_t op2
, size_t vl
) {
375 return __riscv_vmulhsu_vv_i8m8_m(mask
, op1
, op2
, vl
);
378 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
379 // CHECK-RV64-NEXT: entry:
380 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
381 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
383 vint8m8_t
test_vmulhsu_vx_i8m8_m(vbool1_t mask
, vint8m8_t op1
, uint8_t op2
, size_t vl
) {
384 return __riscv_vmulhsu_vx_i8m8_m(mask
, op1
, op2
, vl
);
387 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
388 // CHECK-RV64-NEXT: entry:
389 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
390 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
392 vint16mf2_t
test_vmulhsu_vv_i16mf2_m(vbool32_t mask
, vint16mf2_t op1
, vuint16mf2_t op2
, size_t vl
) {
393 return __riscv_vmulhsu_vv_i16mf2_m(mask
, op1
, op2
, vl
);
396 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
397 // CHECK-RV64-NEXT: entry:
398 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
399 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
401 vint16mf2_t
test_vmulhsu_vx_i16mf2_m(vbool32_t mask
, vint16mf2_t op1
, uint16_t op2
, size_t vl
) {
402 return __riscv_vmulhsu_vx_i16mf2_m(mask
, op1
, op2
, vl
);
405 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
406 // CHECK-RV64-NEXT: entry:
407 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
408 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
410 vint16m1_t
test_vmulhsu_vv_i16m1_m(vbool16_t mask
, vint16m1_t op1
, vuint16m1_t op2
, size_t vl
) {
411 return __riscv_vmulhsu_vv_i16m1_m(mask
, op1
, op2
, vl
);
414 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
415 // CHECK-RV64-NEXT: entry:
416 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
417 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
419 vint16m1_t
test_vmulhsu_vx_i16m1_m(vbool16_t mask
, vint16m1_t op1
, uint16_t op2
, size_t vl
) {
420 return __riscv_vmulhsu_vx_i16m1_m(mask
, op1
, op2
, vl
);
423 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
424 // CHECK-RV64-NEXT: entry:
425 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
426 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
428 vint16m2_t
test_vmulhsu_vv_i16m2_m(vbool8_t mask
, vint16m2_t op1
, vuint16m2_t op2
, size_t vl
) {
429 return __riscv_vmulhsu_vv_i16m2_m(mask
, op1
, op2
, vl
);
432 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
433 // CHECK-RV64-NEXT: entry:
434 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
435 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
437 vint16m2_t
test_vmulhsu_vx_i16m2_m(vbool8_t mask
, vint16m2_t op1
, uint16_t op2
, size_t vl
) {
438 return __riscv_vmulhsu_vx_i16m2_m(mask
, op1
, op2
, vl
);
441 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
444 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
446 vint16m4_t
test_vmulhsu_vv_i16m4_m(vbool4_t mask
, vint16m4_t op1
, vuint16m4_t op2
, size_t vl
) {
447 return __riscv_vmulhsu_vv_i16m4_m(mask
, op1
, op2
, vl
);
450 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
451 // CHECK-RV64-NEXT: entry:
452 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
453 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
455 vint16m4_t
test_vmulhsu_vx_i16m4_m(vbool4_t mask
, vint16m4_t op1
, uint16_t op2
, size_t vl
) {
456 return __riscv_vmulhsu_vx_i16m4_m(mask
, op1
, op2
, vl
);
459 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
460 // CHECK-RV64-NEXT: entry:
461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
462 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
464 vint16m8_t
test_vmulhsu_vv_i16m8_m(vbool2_t mask
, vint16m8_t op1
, vuint16m8_t op2
, size_t vl
) {
465 return __riscv_vmulhsu_vv_i16m8_m(mask
, op1
, op2
, vl
);
468 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
469 // CHECK-RV64-NEXT: entry:
470 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
471 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
473 vint16m8_t
test_vmulhsu_vx_i16m8_m(vbool2_t mask
, vint16m8_t op1
, uint16_t op2
, size_t vl
) {
474 return __riscv_vmulhsu_vx_i16m8_m(mask
, op1
, op2
, vl
);
477 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
478 // CHECK-RV64-NEXT: entry:
479 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
480 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
482 vint32m1_t
test_vmulhsu_vv_i32m1_m(vbool32_t mask
, vint32m1_t op1
, vuint32m1_t op2
, size_t vl
) {
483 return __riscv_vmulhsu_vv_i32m1_m(mask
, op1
, op2
, vl
);
486 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
487 // CHECK-RV64-NEXT: entry:
488 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
489 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
491 vint32m1_t
test_vmulhsu_vx_i32m1_m(vbool32_t mask
, vint32m1_t op1
, uint32_t op2
, size_t vl
) {
492 return __riscv_vmulhsu_vx_i32m1_m(mask
, op1
, op2
, vl
);
495 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
496 // CHECK-RV64-NEXT: entry:
497 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
498 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
500 vint32m2_t
test_vmulhsu_vv_i32m2_m(vbool16_t mask
, vint32m2_t op1
, vuint32m2_t op2
, size_t vl
) {
501 return __riscv_vmulhsu_vv_i32m2_m(mask
, op1
, op2
, vl
);
504 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
505 // CHECK-RV64-NEXT: entry:
506 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
507 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
509 vint32m2_t
test_vmulhsu_vx_i32m2_m(vbool16_t mask
, vint32m2_t op1
, uint32_t op2
, size_t vl
) {
510 return __riscv_vmulhsu_vx_i32m2_m(mask
, op1
, op2
, vl
);
513 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
514 // CHECK-RV64-NEXT: entry:
515 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
516 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
518 vint32m4_t
test_vmulhsu_vv_i32m4_m(vbool8_t mask
, vint32m4_t op1
, vuint32m4_t op2
, size_t vl
) {
519 return __riscv_vmulhsu_vv_i32m4_m(mask
, op1
, op2
, vl
);
522 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
523 // CHECK-RV64-NEXT: entry:
524 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
525 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
527 vint32m4_t
test_vmulhsu_vx_i32m4_m(vbool8_t mask
, vint32m4_t op1
, uint32_t op2
, size_t vl
) {
528 return __riscv_vmulhsu_vx_i32m4_m(mask
, op1
, op2
, vl
);
531 // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
534 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
536 vint32m8_t
test_vmulhsu_vv_i32m8_m(vbool4_t mask
, vint32m8_t op1
, vuint32m8_t op2
, size_t vl
) {
537 return __riscv_vmulhsu_vv_i32m8_m(mask
, op1
, op2
, vl
);
540 // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
541 // CHECK-RV64-NEXT: entry:
542 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
543 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
545 vint32m8_t
test_vmulhsu_vx_i32m8_m(vbool4_t mask
, vint32m8_t op1
, uint32_t op2
, size_t vl
) {
546 return __riscv_vmulhsu_vx_i32m8_m(mask
, op1
, op2
, vl
);