1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
13 define <vscale x 1 x float> @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
17 ; CHECK-NEXT: vfmacc.vv v8, v10, v9
20 %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
21 <vscale x 1 x float> %0,
22 <vscale x 1 x float> %1,
23 <vscale x 1 x float> %2,
24 iXLen 7, iXLen %3, iXLen 1)
26 ret <vscale x 1 x float> %a
29 declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
35 define <vscale x 1 x float> @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
36 ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
39 ; CHECK-NEXT: vfmadd.vv v8, v9, v10
42 %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
43 <vscale x 1 x float> %0,
44 <vscale x 1 x float> %1,
45 <vscale x 1 x float> %2,
46 iXLen 7, iXLen %3, iXLen 1)
48 ret <vscale x 1 x float> %a
51 declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
57 define <vscale x 1 x float> @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
58 ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
59 ; CHECK: # %bb.0: # %entry
60 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
61 ; CHECK-NEXT: vfmsac.vv v8, v10, v9
64 %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
65 <vscale x 1 x float> %0,
66 <vscale x 1 x float> %1,
67 <vscale x 1 x float> %2,
68 iXLen 7, iXLen %3, iXLen 1)
70 ret <vscale x 1 x float> %a
73 declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
79 define <vscale x 1 x float> @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
80 ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
81 ; CHECK: # %bb.0: # %entry
82 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
83 ; CHECK-NEXT: vfmsub.vv v8, v9, v10
86 %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
87 <vscale x 1 x float> %0,
88 <vscale x 1 x float> %1,
89 <vscale x 1 x float> %2,
90 iXLen 7, iXLen %3, iXLen 1)
92 ret <vscale x 1 x float> %a
95 declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
101 define <vscale x 1 x float> @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
102 ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
103 ; CHECK: # %bb.0: # %entry
104 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
105 ; CHECK-NEXT: vfnmacc.vv v8, v10, v9
108 %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
109 <vscale x 1 x float> %0,
110 <vscale x 1 x float> %1,
111 <vscale x 1 x float> %2,
112 iXLen 7, iXLen %3, iXLen 1)
114 ret <vscale x 1 x float> %a
117 declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
118 <vscale x 1 x float>,
119 <vscale x 1 x float>,
120 <vscale x 1 x float>,
121 iXLen, iXLen, iXLen);
123 define <vscale x 1 x float> @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
124 ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
125 ; CHECK: # %bb.0: # %entry
126 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
127 ; CHECK-NEXT: vfnmadd.vv v8, v9, v10
130 %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
131 <vscale x 1 x float> %0,
132 <vscale x 1 x float> %1,
133 <vscale x 1 x float> %2,
134 iXLen 7, iXLen %3, iXLen 1)
136 ret <vscale x 1 x float> %a
139 declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
140 <vscale x 1 x float>,
141 <vscale x 1 x float>,
142 <vscale x 1 x float>,
143 iXLen, iXLen, iXLen);
145 define <vscale x 1 x float> @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
146 ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
147 ; CHECK: # %bb.0: # %entry
148 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
149 ; CHECK-NEXT: vfnmsac.vv v8, v10, v9
152 %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
153 <vscale x 1 x float> %0,
154 <vscale x 1 x float> %1,
155 <vscale x 1 x float> %2,
156 iXLen 7, iXLen %3, iXLen 1)
158 ret <vscale x 1 x float> %a
161 declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
162 <vscale x 1 x float>,
163 <vscale x 1 x float>,
164 <vscale x 1 x float>,
165 iXLen, iXLen, iXLen);
167 define <vscale x 1 x float> @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
168 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
169 ; CHECK: # %bb.0: # %entry
170 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
171 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
174 %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
175 <vscale x 1 x float> %0,
176 <vscale x 1 x float> %1,
177 <vscale x 1 x float> %2,
178 iXLen 7, iXLen %3, iXLen 1)
180 ret <vscale x 1 x float> %a
183 declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
184 <vscale x 1 x float>,
187 iXLen, iXLen, iXLen);
189 define <vscale x 1 x float> @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
190 ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
191 ; CHECK: # %bb.0: # %entry
192 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
193 ; CHECK-NEXT: vfwmacc.vv v8, v9, v10
196 %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
197 <vscale x 1 x float> %0,
198 <vscale x 1 x half> %1,
199 <vscale x 1 x half> %2,
200 iXLen 7, iXLen %3, iXLen 1)
202 ret <vscale x 1 x float> %a
205 declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
206 <vscale x 1 x float>,
209 iXLen, iXLen, iXLen);
211 define <vscale x 1 x float> @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
212 ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
213 ; CHECK: # %bb.0: # %entry
214 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
215 ; CHECK-NEXT: vfwmsac.vv v8, v9, v10
218 %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
219 <vscale x 1 x float> %0,
220 <vscale x 1 x half> %1,
221 <vscale x 1 x half> %2,
222 iXLen 7, iXLen %3, iXLen 1)
224 ret <vscale x 1 x float> %a
227 declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
228 <vscale x 1 x float>,
231 iXLen, iXLen, iXLen);
233 define <vscale x 1 x float> @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
234 ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
235 ; CHECK: # %bb.0: # %entry
236 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
237 ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
240 %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
241 <vscale x 1 x float> %0,
242 <vscale x 1 x half> %1,
243 <vscale x 1 x half> %2,
244 iXLen 7, iXLen %3, iXLen 1)
246 ret <vscale x 1 x float> %a
249 declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
250 <vscale x 1 x float>,
253 iXLen, iXLen, iXLen);
255 define <vscale x 1 x float> @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
256 ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
257 ; CHECK: # %bb.0: # %entry
258 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
259 ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
262 %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
263 <vscale x 1 x float> %0,
264 <vscale x 1 x half> %1,
265 <vscale x 1 x half> %2,
266 iXLen 7, iXLen %3, iXLen 1)
268 ret <vscale x 1 x float> %a
271 declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
278 define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
279 ; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
280 ; RV32: # %bb.0: # %entry
281 ; RV32-NEXT: addi sp, sp, -16
282 ; RV32-NEXT: sw a0, 8(sp)
283 ; RV32-NEXT: sw a1, 12(sp)
284 ; RV32-NEXT: addi a0, sp, 8
285 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
286 ; RV32-NEXT: vlse64.v v10, (a0), zero
287 ; RV32-NEXT: vmacc.vv v8, v9, v10
288 ; RV32-NEXT: addi sp, sp, 16
291 ; RV64-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
292 ; RV64: # %bb.0: # %entry
293 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
294 ; RV64-NEXT: vmacc.vx v8, a0, v9
297 %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
298 <vscale x 1 x i64> %0,
300 <vscale x 1 x i64> %2,
303 ret <vscale x 1 x i64> %a
306 declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
313 define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
314 ; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
315 ; RV32: # %bb.0: # %entry
316 ; RV32-NEXT: addi sp, sp, -16
317 ; RV32-NEXT: sw a0, 8(sp)
318 ; RV32-NEXT: sw a1, 12(sp)
319 ; RV32-NEXT: addi a0, sp, 8
320 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
321 ; RV32-NEXT: vlse64.v v10, (a0), zero
322 ; RV32-NEXT: vmadd.vv v8, v10, v9
323 ; RV32-NEXT: addi sp, sp, 16
326 ; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
327 ; RV64: # %bb.0: # %entry
328 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
329 ; RV64-NEXT: vmadd.vx v8, a0, v9
332 %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
333 <vscale x 1 x i64> %0,
335 <vscale x 1 x i64> %2,
338 ret <vscale x 1 x i64> %a
341 declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
348 define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
349 ; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
350 ; RV32: # %bb.0: # %entry
351 ; RV32-NEXT: addi sp, sp, -16
352 ; RV32-NEXT: sw a0, 8(sp)
353 ; RV32-NEXT: sw a1, 12(sp)
354 ; RV32-NEXT: addi a0, sp, 8
355 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
356 ; RV32-NEXT: vlse64.v v10, (a0), zero
357 ; RV32-NEXT: vnmsac.vv v8, v9, v10
358 ; RV32-NEXT: addi sp, sp, 16
361 ; RV64-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
362 ; RV64: # %bb.0: # %entry
363 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
364 ; RV64-NEXT: vnmsac.vx v8, a0, v9
367 %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
368 <vscale x 1 x i64> %0,
370 <vscale x 1 x i64> %2,
373 ret <vscale x 1 x i64> %a
376 declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
383 define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
384 ; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
385 ; RV32: # %bb.0: # %entry
386 ; RV32-NEXT: addi sp, sp, -16
387 ; RV32-NEXT: sw a0, 8(sp)
388 ; RV32-NEXT: sw a1, 12(sp)
389 ; RV32-NEXT: addi a0, sp, 8
390 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
391 ; RV32-NEXT: vlse64.v v10, (a0), zero
392 ; RV32-NEXT: vnmsub.vv v8, v10, v9
393 ; RV32-NEXT: addi sp, sp, 16
396 ; RV64-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
397 ; RV64: # %bb.0: # %entry
398 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
399 ; RV64-NEXT: vnmsub.vx v8, a0, v9
402 %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
403 <vscale x 1 x i64> %0,
405 <vscale x 1 x i64> %2,
408 ret <vscale x 1 x i64> %a
411 declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
418 define <vscale x 1 x i16> @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
419 ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
420 ; CHECK: # %bb.0: # %entry
421 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
422 ; CHECK-NEXT: vwmacc.vv v8, v9, v10
425 %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
426 <vscale x 1 x i16> %0,
427 <vscale x 1 x i8> %1,
428 <vscale x 1 x i8> %2,
431 ret <vscale x 1 x i16> %a
434 declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
441 define <vscale x 1 x i16> @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
442 ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
443 ; CHECK: # %bb.0: # %entry
444 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
445 ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10
448 %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
449 <vscale x 1 x i16> %0,
450 <vscale x 1 x i8> %1,
451 <vscale x 1 x i8> %2,
454 ret <vscale x 1 x i16> %a
457 declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
464 define <vscale x 1 x i16> @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
465 ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
468 ; CHECK-NEXT: vwmaccu.vv v8, v9, v10
471 %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
472 <vscale x 1 x i16> %0,
473 <vscale x 1 x i8> %1,
474 <vscale x 1 x i8> %2,
477 ret <vscale x 1 x i16> %a
480 declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
487 define <vscale x 1 x i16> @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
488 ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
489 ; CHECK: # %bb.0: # %entry
490 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
491 ; CHECK-NEXT: vwmaccus.vx v8, a0, v9
494 %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
495 <vscale x 1 x i16> %0,
497 <vscale x 1 x i8> %2,
500 ret <vscale x 1 x i16> %a
503 declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
509 define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
510 ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
511 ; CHECK: # %bb.0: # %entry
512 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
513 ; CHECK-NEXT: vredsum.vs v8, v8, v9
516 %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
517 <vscale x 8 x i8> undef,
518 <vscale x 1 x i8> %0,
519 <vscale x 8 x i8> %1,
522 ret <vscale x 8 x i8> %a
525 declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
531 define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
532 ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
533 ; CHECK: # %bb.0: # %entry
534 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
535 ; CHECK-NEXT: vredand.vs v8, v8, v9
538 %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
539 <vscale x 8 x i8> undef,
540 <vscale x 1 x i8> %0,
541 <vscale x 8 x i8> %1,
544 ret <vscale x 8 x i8> %a
547 declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
553 define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
554 ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
555 ; CHECK: # %bb.0: # %entry
556 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
557 ; CHECK-NEXT: vredor.vs v8, v8, v9
560 %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
561 <vscale x 8 x i8> undef,
562 <vscale x 1 x i8> %0,
563 <vscale x 8 x i8> %1,
566 ret <vscale x 8 x i8> %a
569 declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
575 define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
576 ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
577 ; CHECK: # %bb.0: # %entry
578 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
579 ; CHECK-NEXT: vredxor.vs v8, v8, v9
582 %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
583 <vscale x 8 x i8> undef,
584 <vscale x 1 x i8> %0,
585 <vscale x 8 x i8> %1,
588 ret <vscale x 8 x i8> %a
591 declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
597 define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
598 ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
599 ; CHECK: # %bb.0: # %entry
600 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
601 ; CHECK-NEXT: vredminu.vs v8, v8, v9
604 %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
605 <vscale x 8 x i8> undef,
606 <vscale x 1 x i8> %0,
607 <vscale x 8 x i8> %1,
610 ret <vscale x 8 x i8> %a
613 declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
619 define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
620 ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
621 ; CHECK: # %bb.0: # %entry
622 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
623 ; CHECK-NEXT: vredmin.vs v8, v8, v9
626 %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
627 <vscale x 8 x i8> undef,
628 <vscale x 1 x i8> %0,
629 <vscale x 8 x i8> %1,
632 ret <vscale x 8 x i8> %a
635 declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
641 define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
642 ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
643 ; CHECK: # %bb.0: # %entry
644 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
645 ; CHECK-NEXT: vredmaxu.vs v8, v8, v9
648 %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
649 <vscale x 8 x i8> undef,
650 <vscale x 1 x i8> %0,
651 <vscale x 8 x i8> %1,
654 ret <vscale x 8 x i8> %a
657 declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
663 define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
664 ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
665 ; CHECK: # %bb.0: # %entry
666 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
667 ; CHECK-NEXT: vredmax.vs v8, v8, v9
670 %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
671 <vscale x 8 x i8> undef,
672 <vscale x 1 x i8> %0,
673 <vscale x 8 x i8> %1,
676 ret <vscale x 8 x i8> %a
679 declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
685 define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 1 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
686 ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
687 ; CHECK: # %bb.0: # %entry
688 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
689 ; CHECK-NEXT: vwredsumu.vs v8, v8, v9
692 %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
693 <vscale x 4 x i16> undef,
694 <vscale x 1 x i8> %0,
695 <vscale x 4 x i16> %1,
698 ret <vscale x 4 x i16> %a
701 declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
707 define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 1 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
708 ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
709 ; CHECK: # %bb.0: # %entry
710 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
711 ; CHECK-NEXT: vwredsum.vs v8, v8, v9
714 %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
715 <vscale x 4 x i16> undef,
716 <vscale x 1 x i8> %0,
717 <vscale x 4 x i16> %1,
720 ret <vscale x 4 x i16> %a
723 declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
729 define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
730 ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
731 ; CHECK: # %bb.0: # %entry
732 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
733 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
736 %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
737 <vscale x 4 x half> undef,
738 <vscale x 1 x half> %0,
739 <vscale x 4 x half> %1,
742 ret <vscale x 4 x half> %a
745 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
751 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
752 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
753 ; CHECK: # %bb.0: # %entry
754 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
755 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
758 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
759 <vscale x 4 x half> undef,
760 <vscale x 1 x half> %0,
761 <vscale x 4 x half> %1,
764 ret <vscale x 4 x half> %a
767 declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
773 define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
774 ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
775 ; CHECK: # %bb.0: # %entry
776 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
777 ; CHECK-NEXT: vfredmax.vs v8, v8, v9
780 %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
781 <vscale x 4 x half> undef,
782 <vscale x 1 x half> %0,
783 <vscale x 4 x half> %1,
786 ret <vscale x 4 x half> %a
789 declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
795 define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
796 ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
799 ; CHECK-NEXT: vfredmin.vs v8, v8, v9
802 %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
803 <vscale x 4 x half> undef,
804 <vscale x 1 x half> %0,
805 <vscale x 4 x half> %1,
808 ret <vscale x 4 x half> %a
811 declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
812 <vscale x 2 x float>,
814 <vscale x 2 x float>,
817 define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 1 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
818 ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
819 ; CHECK: # %bb.0: # %entry
820 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
821 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
824 %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
825 <vscale x 2 x float> undef,
826 <vscale x 1 x half> %0,
827 <vscale x 2 x float> %1,
830 ret <vscale x 2 x float> %a
832 declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
833 <vscale x 2 x float>,
835 <vscale x 2 x float>,
838 define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 1 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
839 ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
840 ; CHECK: # %bb.0: # %entry
841 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
842 ; CHECK-NEXT: vfwredusum.vs v8, v8, v9
845 %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
846 <vscale x 2 x float> undef,
847 <vscale x 1 x half> %0,
848 <vscale x 2 x float> %1,
851 ret <vscale x 2 x float> %a
854 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
861 define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
862 ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
863 ; CHECK: # %bb.0: # %entry
864 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
865 ; CHECK-NEXT: vslidedown.vx v8, v8, a0
868 %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
869 <vscale x 1 x i8> undef,
870 <vscale x 1 x i8> %0,
875 ret <vscale x 1 x i8> %a
878 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
885 define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
886 ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
887 ; CHECK: # %bb.0: # %entry
888 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
889 ; CHECK-NEXT: vslideup.vx v8, v9, a0
892 %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
893 <vscale x 1 x i8> %0,
894 <vscale x 1 x i8> %1,
899 ret <vscale x 1 x i8> %a
902 declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64, iXLen);
904 define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(i64 %0, iXLen %1) nounwind {
905 ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
906 ; RV32: # %bb.0: # %entry
907 ; RV32-NEXT: addi sp, sp, -16
908 ; RV32-NEXT: sw a0, 8(sp)
909 ; RV32-NEXT: sw a1, 12(sp)
910 ; RV32-NEXT: addi a0, sp, 8
911 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
912 ; RV32-NEXT: vlse64.v v8, (a0), zero
913 ; RV32-NEXT: addi sp, sp, 16
916 ; RV64-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
917 ; RV64: # %bb.0: # %entry
918 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
919 ; RV64-NEXT: vmv.s.x v8, a0
922 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> undef, i64 %0, iXLen %1)
923 ret <vscale x 1 x i64> %a
926 declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, iXLen)
928 define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind {
929 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
930 ; CHECK: # %bb.0: # %entry
931 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
932 ; CHECK-NEXT: vfmv.s.f v8, fa0
935 %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> undef, half %0, iXLen %1)
936 ret <vscale x 1 x half> %a
939 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
945 define <vscale x 1 x i8> @intrinsic_vcompress_um_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
946 ; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8:
947 ; CHECK: # %bb.0: # %entry
948 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
949 ; CHECK-NEXT: vcompress.vm v9, v8, v0
950 ; CHECK-NEXT: vmv1r.v v8, v9
953 %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
954 <vscale x 1 x i8> undef,
955 <vscale x 1 x i8> %0,
956 <vscale x 1 x i1> %1,
959 ret <vscale x 1 x i8> %a