1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
13 define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; CHECK-NEXT: vfsub.vv v8, v8, v9
20 %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
21 <vscale x 1 x half> undef,
22 <vscale x 1 x half> %0,
23 <vscale x 1 x half> %1,
26 ret <vscale x 1 x half> %a
29 declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
36 define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
40 ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
44 <vscale x 1 x half> %0,
45 <vscale x 1 x half> %1,
46 <vscale x 1 x half> %2,
48 iXLen 7, iXLen %4, iXLen 1)
50 ret <vscale x 1 x half> %a
53 declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
59 define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
63 ; CHECK-NEXT: vfsub.vv v8, v8, v9
66 %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
67 <vscale x 2 x half> undef,
68 <vscale x 2 x half> %0,
69 <vscale x 2 x half> %1,
72 ret <vscale x 2 x half> %a
75 declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
82 define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
86 ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
89 %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
90 <vscale x 2 x half> %0,
91 <vscale x 2 x half> %1,
92 <vscale x 2 x half> %2,
94 iXLen 7, iXLen %4, iXLen 1)
96 ret <vscale x 2 x half> %a
99 declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
105 define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
106 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
109 ; CHECK-NEXT: vfsub.vv v8, v8, v9
112 %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
113 <vscale x 4 x half> undef,
114 <vscale x 4 x half> %0,
115 <vscale x 4 x half> %1,
118 ret <vscale x 4 x half> %a
121 declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
126 iXLen, iXLen, iXLen);
128 define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
132 ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
135 %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
136 <vscale x 4 x half> %0,
137 <vscale x 4 x half> %1,
138 <vscale x 4 x half> %2,
139 <vscale x 4 x i1> %3,
140 iXLen 7, iXLen %4, iXLen 1)
142 ret <vscale x 4 x half> %a
145 declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
151 define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
152 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
155 ; CHECK-NEXT: vfsub.vv v8, v8, v10
158 %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
159 <vscale x 8 x half> undef,
160 <vscale x 8 x half> %0,
161 <vscale x 8 x half> %1,
164 ret <vscale x 8 x half> %a
167 declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
172 iXLen, iXLen, iXLen);
174 define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
178 ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t
181 %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
182 <vscale x 8 x half> %0,
183 <vscale x 8 x half> %1,
184 <vscale x 8 x half> %2,
185 <vscale x 8 x i1> %3,
186 iXLen 7, iXLen %4, iXLen 1)
188 ret <vscale x 8 x half> %a
191 declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
192 <vscale x 16 x half>,
193 <vscale x 16 x half>,
194 <vscale x 16 x half>,
197 define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
198 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
201 ; CHECK-NEXT: vfsub.vv v8, v8, v12
204 %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
205 <vscale x 16 x half> undef,
206 <vscale x 16 x half> %0,
207 <vscale x 16 x half> %1,
210 ret <vscale x 16 x half> %a
213 declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
214 <vscale x 16 x half>,
215 <vscale x 16 x half>,
216 <vscale x 16 x half>,
218 iXLen, iXLen, iXLen);
220 define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
224 ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t
227 %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
228 <vscale x 16 x half> %0,
229 <vscale x 16 x half> %1,
230 <vscale x 16 x half> %2,
231 <vscale x 16 x i1> %3,
232 iXLen 7, iXLen %4, iXLen 1)
234 ret <vscale x 16 x half> %a
237 declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
238 <vscale x 32 x half>,
239 <vscale x 32 x half>,
240 <vscale x 32 x half>,
243 define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
244 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
247 ; CHECK-NEXT: vfsub.vv v8, v8, v16
250 %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
251 <vscale x 32 x half> undef,
252 <vscale x 32 x half> %0,
253 <vscale x 32 x half> %1,
256 ret <vscale x 32 x half> %a
259 declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
260 <vscale x 32 x half>,
261 <vscale x 32 x half>,
262 <vscale x 32 x half>,
264 iXLen, iXLen, iXLen);
266 define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vl8re16.v v24, (a0)
270 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
271 ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
274 %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
275 <vscale x 32 x half> %0,
276 <vscale x 32 x half> %1,
277 <vscale x 32 x half> %2,
278 <vscale x 32 x i1> %3,
279 iXLen 7, iXLen %4, iXLen 1)
281 ret <vscale x 32 x half> %a
284 declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
285 <vscale x 1 x float>,
286 <vscale x 1 x float>,
287 <vscale x 1 x float>,
290 define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
291 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32:
292 ; CHECK: # %bb.0: # %entry
293 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
294 ; CHECK-NEXT: vfsub.vv v8, v8, v9
297 %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
298 <vscale x 1 x float> undef,
299 <vscale x 1 x float> %0,
300 <vscale x 1 x float> %1,
303 ret <vscale x 1 x float> %a
306 declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
307 <vscale x 1 x float>,
308 <vscale x 1 x float>,
309 <vscale x 1 x float>,
311 iXLen, iXLen, iXLen);
313 define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
314 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
317 ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
320 %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
321 <vscale x 1 x float> %0,
322 <vscale x 1 x float> %1,
323 <vscale x 1 x float> %2,
324 <vscale x 1 x i1> %3,
325 iXLen 7, iXLen %4, iXLen 1)
327 ret <vscale x 1 x float> %a
330 declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
331 <vscale x 2 x float>,
332 <vscale x 2 x float>,
333 <vscale x 2 x float>,
336 define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
337 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32:
338 ; CHECK: # %bb.0: # %entry
339 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
340 ; CHECK-NEXT: vfsub.vv v8, v8, v9
343 %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
344 <vscale x 2 x float> undef,
345 <vscale x 2 x float> %0,
346 <vscale x 2 x float> %1,
349 ret <vscale x 2 x float> %a
352 declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
353 <vscale x 2 x float>,
354 <vscale x 2 x float>,
355 <vscale x 2 x float>,
357 iXLen, iXLen, iXLen);
359 define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
360 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
361 ; CHECK: # %bb.0: # %entry
362 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
363 ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
366 %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
367 <vscale x 2 x float> %0,
368 <vscale x 2 x float> %1,
369 <vscale x 2 x float> %2,
370 <vscale x 2 x i1> %3,
371 iXLen 7, iXLen %4, iXLen 1)
373 ret <vscale x 2 x float> %a
376 declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
377 <vscale x 4 x float>,
378 <vscale x 4 x float>,
379 <vscale x 4 x float>,
382 define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
383 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32:
384 ; CHECK: # %bb.0: # %entry
385 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
386 ; CHECK-NEXT: vfsub.vv v8, v8, v10
389 %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
390 <vscale x 4 x float> undef,
391 <vscale x 4 x float> %0,
392 <vscale x 4 x float> %1,
395 ret <vscale x 4 x float> %a
398 declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
399 <vscale x 4 x float>,
400 <vscale x 4 x float>,
401 <vscale x 4 x float>,
403 iXLen, iXLen, iXLen);
405 define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
406 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
407 ; CHECK: # %bb.0: # %entry
408 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
409 ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t
412 %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
413 <vscale x 4 x float> %0,
414 <vscale x 4 x float> %1,
415 <vscale x 4 x float> %2,
416 <vscale x 4 x i1> %3,
417 iXLen 7, iXLen %4, iXLen 1)
419 ret <vscale x 4 x float> %a
422 declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
423 <vscale x 8 x float>,
424 <vscale x 8 x float>,
425 <vscale x 8 x float>,
428 define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
432 ; CHECK-NEXT: vfsub.vv v8, v8, v12
435 %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
436 <vscale x 8 x float> undef,
437 <vscale x 8 x float> %0,
438 <vscale x 8 x float> %1,
441 ret <vscale x 8 x float> %a
444 declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
445 <vscale x 8 x float>,
446 <vscale x 8 x float>,
447 <vscale x 8 x float>,
449 iXLen, iXLen, iXLen);
451 define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
452 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
453 ; CHECK: # %bb.0: # %entry
454 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
455 ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t
458 %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
459 <vscale x 8 x float> %0,
460 <vscale x 8 x float> %1,
461 <vscale x 8 x float> %2,
462 <vscale x 8 x i1> %3,
463 iXLen 7, iXLen %4, iXLen 1)
465 ret <vscale x 8 x float> %a
468 declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
469 <vscale x 16 x float>,
470 <vscale x 16 x float>,
471 <vscale x 16 x float>,
474 define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
475 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
478 ; CHECK-NEXT: vfsub.vv v8, v8, v16
481 %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
482 <vscale x 16 x float> undef,
483 <vscale x 16 x float> %0,
484 <vscale x 16 x float> %1,
487 ret <vscale x 16 x float> %a
490 declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
491 <vscale x 16 x float>,
492 <vscale x 16 x float>,
493 <vscale x 16 x float>,
495 iXLen, iXLen, iXLen);
497 define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
498 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32:
499 ; CHECK: # %bb.0: # %entry
500 ; CHECK-NEXT: vl8re32.v v24, (a0)
501 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
502 ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
505 %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
506 <vscale x 16 x float> %0,
507 <vscale x 16 x float> %1,
508 <vscale x 16 x float> %2,
509 <vscale x 16 x i1> %3,
510 iXLen 7, iXLen %4, iXLen 1)
512 ret <vscale x 16 x float> %a
515 declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
516 <vscale x 1 x double>,
517 <vscale x 1 x double>,
518 <vscale x 1 x double>,
521 define <vscale x 1 x double> @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
522 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64:
523 ; CHECK: # %bb.0: # %entry
524 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
525 ; CHECK-NEXT: vfsub.vv v8, v8, v9
528 %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(
529 <vscale x 1 x double> undef,
530 <vscale x 1 x double> %0,
531 <vscale x 1 x double> %1,
534 ret <vscale x 1 x double> %a
537 declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
538 <vscale x 1 x double>,
539 <vscale x 1 x double>,
540 <vscale x 1 x double>,
542 iXLen, iXLen, iXLen);
544 define <vscale x 1 x double> @intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
545 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
548 ; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t
551 %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64(
552 <vscale x 1 x double> %0,
553 <vscale x 1 x double> %1,
554 <vscale x 1 x double> %2,
555 <vscale x 1 x i1> %3,
556 iXLen 7, iXLen %4, iXLen 1)
558 ret <vscale x 1 x double> %a
561 declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
562 <vscale x 2 x double>,
563 <vscale x 2 x double>,
564 <vscale x 2 x double>,
567 define <vscale x 2 x double> @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
568 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64:
569 ; CHECK: # %bb.0: # %entry
570 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
571 ; CHECK-NEXT: vfsub.vv v8, v8, v10
574 %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64(
575 <vscale x 2 x double> undef,
576 <vscale x 2 x double> %0,
577 <vscale x 2 x double> %1,
580 ret <vscale x 2 x double> %a
583 declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
584 <vscale x 2 x double>,
585 <vscale x 2 x double>,
586 <vscale x 2 x double>,
588 iXLen, iXLen, iXLen);
590 define <vscale x 2 x double> @intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
591 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
592 ; CHECK: # %bb.0: # %entry
593 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
594 ; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t
597 %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64(
598 <vscale x 2 x double> %0,
599 <vscale x 2 x double> %1,
600 <vscale x 2 x double> %2,
601 <vscale x 2 x i1> %3,
602 iXLen 7, iXLen %4, iXLen 1)
604 ret <vscale x 2 x double> %a
607 declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
608 <vscale x 4 x double>,
609 <vscale x 4 x double>,
610 <vscale x 4 x double>,
613 define <vscale x 4 x double> @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
614 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
617 ; CHECK-NEXT: vfsub.vv v8, v8, v12
620 %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64(
621 <vscale x 4 x double> undef,
622 <vscale x 4 x double> %0,
623 <vscale x 4 x double> %1,
626 ret <vscale x 4 x double> %a
629 declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
630 <vscale x 4 x double>,
631 <vscale x 4 x double>,
632 <vscale x 4 x double>,
634 iXLen, iXLen, iXLen);
636 define <vscale x 4 x double> @intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
637 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
640 ; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t
643 %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64(
644 <vscale x 4 x double> %0,
645 <vscale x 4 x double> %1,
646 <vscale x 4 x double> %2,
647 <vscale x 4 x i1> %3,
648 iXLen 7, iXLen %4, iXLen 1)
650 ret <vscale x 4 x double> %a
653 declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
654 <vscale x 8 x double>,
655 <vscale x 8 x double>,
656 <vscale x 8 x double>,
659 define <vscale x 8 x double> @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
660 ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64:
661 ; CHECK: # %bb.0: # %entry
662 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
663 ; CHECK-NEXT: vfsub.vv v8, v8, v16
666 %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64(
667 <vscale x 8 x double> undef,
668 <vscale x 8 x double> %0,
669 <vscale x 8 x double> %1,
672 ret <vscale x 8 x double> %a
675 declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
676 <vscale x 8 x double>,
677 <vscale x 8 x double>,
678 <vscale x 8 x double>,
680 iXLen, iXLen, iXLen);
682 define <vscale x 8 x double> @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
683 ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64:
684 ; CHECK: # %bb.0: # %entry
685 ; CHECK-NEXT: vl8re64.v v24, (a0)
686 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
687 ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t
690 %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64(
691 <vscale x 8 x double> %0,
692 <vscale x 8 x double> %1,
693 <vscale x 8 x double> %2,
694 <vscale x 8 x i1> %3,
695 iXLen 7, iXLen %4, iXLen 1)
697 ret <vscale x 8 x double> %a
700 declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
706 define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
707 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16:
708 ; CHECK: # %bb.0: # %entry
709 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
710 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
713 %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
714 <vscale x 1 x half> undef,
715 <vscale x 1 x half> %0,
719 ret <vscale x 1 x half> %a
722 declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
727 iXLen, iXLen, iXLen);
729 define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
730 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16:
731 ; CHECK: # %bb.0: # %entry
732 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
733 ; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
736 %a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
737 <vscale x 1 x half> %0,
738 <vscale x 1 x half> %1,
740 <vscale x 1 x i1> %3,
741 iXLen 7, iXLen %4, iXLen 1)
743 ret <vscale x 1 x half> %a
746 declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
752 define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
753 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16:
754 ; CHECK: # %bb.0: # %entry
755 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
756 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
759 %a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
760 <vscale x 2 x half> undef,
761 <vscale x 2 x half> %0,
765 ret <vscale x 2 x half> %a
768 declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
773 iXLen, iXLen, iXLen);
775 define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
776 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16:
777 ; CHECK: # %bb.0: # %entry
778 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
779 ; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
782 %a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
783 <vscale x 2 x half> %0,
784 <vscale x 2 x half> %1,
786 <vscale x 2 x i1> %3,
787 iXLen 7, iXLen %4, iXLen 1)
789 ret <vscale x 2 x half> %a
792 declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
798 define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
799 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16:
800 ; CHECK: # %bb.0: # %entry
801 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
802 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
805 %a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
806 <vscale x 4 x half> undef,
807 <vscale x 4 x half> %0,
811 ret <vscale x 4 x half> %a
814 declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
819 iXLen, iXLen, iXLen);
821 define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
822 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16:
823 ; CHECK: # %bb.0: # %entry
824 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
825 ; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
828 %a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
829 <vscale x 4 x half> %0,
830 <vscale x 4 x half> %1,
832 <vscale x 4 x i1> %3,
833 iXLen 7, iXLen %4, iXLen 1)
835 ret <vscale x 4 x half> %a
838 declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
844 define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
845 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
848 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
851 %a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
852 <vscale x 8 x half> undef,
853 <vscale x 8 x half> %0,
857 ret <vscale x 8 x half> %a
860 declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
865 iXLen, iXLen, iXLen);
867 define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
868 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16:
869 ; CHECK: # %bb.0: # %entry
870 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
871 ; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t
874 %a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
875 <vscale x 8 x half> %0,
876 <vscale x 8 x half> %1,
878 <vscale x 8 x i1> %3,
879 iXLen 7, iXLen %4, iXLen 1)
881 ret <vscale x 8 x half> %a
884 declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
885 <vscale x 16 x half>,
886 <vscale x 16 x half>,
890 define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
891 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
894 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
897 %a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
898 <vscale x 16 x half> undef,
899 <vscale x 16 x half> %0,
903 ret <vscale x 16 x half> %a
906 declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
907 <vscale x 16 x half>,
908 <vscale x 16 x half>,
911 iXLen, iXLen, iXLen);
913 define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
914 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16:
915 ; CHECK: # %bb.0: # %entry
916 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
917 ; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t
920 %a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
921 <vscale x 16 x half> %0,
922 <vscale x 16 x half> %1,
924 <vscale x 16 x i1> %3,
925 iXLen 7, iXLen %4, iXLen 1)
927 ret <vscale x 16 x half> %a
930 declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
931 <vscale x 32 x half>,
932 <vscale x 32 x half>,
936 define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
937 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16:
938 ; CHECK: # %bb.0: # %entry
939 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
940 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
943 %a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
944 <vscale x 32 x half> undef,
945 <vscale x 32 x half> %0,
949 ret <vscale x 32 x half> %a
952 declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
953 <vscale x 32 x half>,
954 <vscale x 32 x half>,
957 iXLen, iXLen, iXLen);
959 define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
960 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16:
961 ; CHECK: # %bb.0: # %entry
962 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
963 ; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t
966 %a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
967 <vscale x 32 x half> %0,
968 <vscale x 32 x half> %1,
970 <vscale x 32 x i1> %3,
971 iXLen 7, iXLen %4, iXLen 1)
973 ret <vscale x 32 x half> %a
976 declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
977 <vscale x 1 x float>,
978 <vscale x 1 x float>,
982 define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
983 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32:
984 ; CHECK: # %bb.0: # %entry
985 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
986 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
989 %a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
990 <vscale x 1 x float> undef,
991 <vscale x 1 x float> %0,
995 ret <vscale x 1 x float> %a
998 declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
999 <vscale x 1 x float>,
1000 <vscale x 1 x float>,
1003 iXLen, iXLen, iXLen);
1005 define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1006 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32:
1007 ; CHECK: # %bb.0: # %entry
1008 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1009 ; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
1012 %a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
1013 <vscale x 1 x float> %0,
1014 <vscale x 1 x float> %1,
1016 <vscale x 1 x i1> %3,
1017 iXLen 7, iXLen %4, iXLen 1)
1019 ret <vscale x 1 x float> %a
1022 declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
1023 <vscale x 2 x float>,
1024 <vscale x 2 x float>,
1028 define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
1029 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32:
1030 ; CHECK: # %bb.0: # %entry
1031 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1032 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1035 %a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
1036 <vscale x 2 x float> undef,
1037 <vscale x 2 x float> %0,
1041 ret <vscale x 2 x float> %a
1044 declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
1045 <vscale x 2 x float>,
1046 <vscale x 2 x float>,
1049 iXLen, iXLen, iXLen);
1051 define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1052 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32:
1053 ; CHECK: # %bb.0: # %entry
1054 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1055 ; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
1058 %a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
1059 <vscale x 2 x float> %0,
1060 <vscale x 2 x float> %1,
1062 <vscale x 2 x i1> %3,
1063 iXLen 7, iXLen %4, iXLen 1)
1065 ret <vscale x 2 x float> %a
1068 declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
1069 <vscale x 4 x float>,
1070 <vscale x 4 x float>,
1074 define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
1075 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32:
1076 ; CHECK: # %bb.0: # %entry
1077 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1078 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1081 %a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
1082 <vscale x 4 x float> undef,
1083 <vscale x 4 x float> %0,
1087 ret <vscale x 4 x float> %a
1090 declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
1091 <vscale x 4 x float>,
1092 <vscale x 4 x float>,
1095 iXLen, iXLen, iXLen);
1097 define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1098 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32:
1099 ; CHECK: # %bb.0: # %entry
1100 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1101 ; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t
1104 %a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
1105 <vscale x 4 x float> %0,
1106 <vscale x 4 x float> %1,
1108 <vscale x 4 x i1> %3,
1109 iXLen 7, iXLen %4, iXLen 1)
1111 ret <vscale x 4 x float> %a
1114 declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
1115 <vscale x 8 x float>,
1116 <vscale x 8 x float>,
1120 define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1121 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32:
1122 ; CHECK: # %bb.0: # %entry
1123 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1124 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1127 %a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
1128 <vscale x 8 x float> undef,
1129 <vscale x 8 x float> %0,
1133 ret <vscale x 8 x float> %a
1136 declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
1137 <vscale x 8 x float>,
1138 <vscale x 8 x float>,
1141 iXLen, iXLen, iXLen);
1143 define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1144 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32:
1145 ; CHECK: # %bb.0: # %entry
1146 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1147 ; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t
1150 %a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
1151 <vscale x 8 x float> %0,
1152 <vscale x 8 x float> %1,
1154 <vscale x 8 x i1> %3,
1155 iXLen 7, iXLen %4, iXLen 1)
1157 ret <vscale x 8 x float> %a
1160 declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
1161 <vscale x 16 x float>,
1162 <vscale x 16 x float>,
1166 define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
1167 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32:
1168 ; CHECK: # %bb.0: # %entry
1169 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1170 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1173 %a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
1174 <vscale x 16 x float> undef,
1175 <vscale x 16 x float> %0,
1179 ret <vscale x 16 x float> %a
1182 declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
1183 <vscale x 16 x float>,
1184 <vscale x 16 x float>,
1187 iXLen, iXLen, iXLen);
1189 define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1190 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32:
1191 ; CHECK: # %bb.0: # %entry
1192 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
1193 ; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t
1196 %a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
1197 <vscale x 16 x float> %0,
1198 <vscale x 16 x float> %1,
1200 <vscale x 16 x i1> %3,
1201 iXLen 7, iXLen %4, iXLen 1)
1203 ret <vscale x 16 x float> %a
1206 declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
1207 <vscale x 1 x double>,
1208 <vscale x 1 x double>,
1212 define <vscale x 1 x double> @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1213 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64:
1214 ; CHECK: # %bb.0: # %entry
1215 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1216 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1219 %a = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64(
1220 <vscale x 1 x double> undef,
1221 <vscale x 1 x double> %0,
1225 ret <vscale x 1 x double> %a
1228 declare <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
1229 <vscale x 1 x double>,
1230 <vscale x 1 x double>,
1233 iXLen, iXLen, iXLen);
1235 define <vscale x 1 x double> @intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1236 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f64_nxv1f64_f64:
1237 ; CHECK: # %bb.0: # %entry
1238 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1239 ; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t
1242 %a = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64(
1243 <vscale x 1 x double> %0,
1244 <vscale x 1 x double> %1,
1246 <vscale x 1 x i1> %3,
1247 iXLen 7, iXLen %4, iXLen 1)
1249 ret <vscale x 1 x double> %a
1252 declare <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
1253 <vscale x 2 x double>,
1254 <vscale x 2 x double>,
1258 define <vscale x 2 x double> @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1259 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64:
1260 ; CHECK: # %bb.0: # %entry
1261 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1262 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1265 %a = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64(
1266 <vscale x 2 x double> undef,
1267 <vscale x 2 x double> %0,
1271 ret <vscale x 2 x double> %a
1274 declare <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
1275 <vscale x 2 x double>,
1276 <vscale x 2 x double>,
1279 iXLen, iXLen, iXLen);
1281 define <vscale x 2 x double> @intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1282 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f64_nxv2f64_f64:
1283 ; CHECK: # %bb.0: # %entry
1284 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1285 ; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t
1288 %a = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64(
1289 <vscale x 2 x double> %0,
1290 <vscale x 2 x double> %1,
1292 <vscale x 2 x i1> %3,
1293 iXLen 7, iXLen %4, iXLen 1)
1295 ret <vscale x 2 x double> %a
1298 declare <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
1299 <vscale x 4 x double>,
1300 <vscale x 4 x double>,
1304 define <vscale x 4 x double> @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1305 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64:
1306 ; CHECK: # %bb.0: # %entry
1307 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1308 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1311 %a = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64(
1312 <vscale x 4 x double> undef,
1313 <vscale x 4 x double> %0,
1317 ret <vscale x 4 x double> %a
1320 declare <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
1321 <vscale x 4 x double>,
1322 <vscale x 4 x double>,
1325 iXLen, iXLen, iXLen);
1327 define <vscale x 4 x double> @intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1328 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f64_nxv4f64_f64:
1329 ; CHECK: # %bb.0: # %entry
1330 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1331 ; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t
1334 %a = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64(
1335 <vscale x 4 x double> %0,
1336 <vscale x 4 x double> %1,
1338 <vscale x 4 x i1> %3,
1339 iXLen 7, iXLen %4, iXLen 1)
1341 ret <vscale x 4 x double> %a
1344 declare <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
1345 <vscale x 8 x double>,
1346 <vscale x 8 x double>,
1350 define <vscale x 8 x double> @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
1351 ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64:
1352 ; CHECK: # %bb.0: # %entry
1353 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1354 ; CHECK-NEXT: vfsub.vf v8, v8, fa0
1357 %a = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64(
1358 <vscale x 8 x double> undef,
1359 <vscale x 8 x double> %0,
1363 ret <vscale x 8 x double> %a
1366 declare <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
1367 <vscale x 8 x double>,
1368 <vscale x 8 x double>,
1371 iXLen, iXLen, iXLen);
1373 define <vscale x 8 x double> @intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1374 ; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f64_nxv8f64_f64:
1375 ; CHECK: # %bb.0: # %entry
1376 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
1377 ; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t
1380 %a = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64(
1381 <vscale x 8 x double> %0,
1382 <vscale x 8 x double> %1,
1384 <vscale x 8 x i1> %3,
1385 iXLen 7, iXLen %4, iXLen 1)
1387 ret <vscale x 8 x double> %a