1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
4 declare <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
5 declare <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
7 ; (fadd (fmul x, y), z)) -> (fma x, y, z)
8 define <vscale x 1 x double> @fma(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
11 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
12 ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
13 ; CHECK-NEXT: vmv.v.v v8, v9
15 %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
16 %2 = call fast <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
17 ret <vscale x 1 x double> %2
20 ; (fadd z, (fmul x, y))) -> (fma x, y, z)
21 define <vscale x 1 x double> @fma_commute(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
22 ; CHECK-LABEL: fma_commute:
24 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
25 ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
26 ; CHECK-NEXT: vmv.v.v v8, v9
28 %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
29 %2 = call fast <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %z, <vscale x 1 x double> %1, <vscale x 1 x i1> %m, i32 %vl)
30 ret <vscale x 1 x double> %2
33 ; Test operand with true mask
34 define <vscale x 1 x double> @fma_true(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
35 ; CHECK-LABEL: fma_true:
37 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
38 ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
39 ; CHECK-NEXT: vmv.v.v v8, v9
41 %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> splat (i1 true), i32 %vl)
42 %2 = call fast <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
43 ret <vscale x 1 x double> %2
46 ; Test operand with normal opcode.
47 define <vscale x 1 x double> @fma_nonvp(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
48 ; CHECK-LABEL: fma_nonvp:
50 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
51 ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
52 ; CHECK-NEXT: vmv.v.v v8, v9
54 %1 = fmul fast <vscale x 1 x double> %x, %y
55 %2 = call fast <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
56 ret <vscale x 1 x double> %2
59 define <vscale x 1 x double> @fma_reassociate(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x double> %d, <vscale x 1 x double> %e, <vscale x 1 x i1> %m, i32 zeroext %vl) {
60 ; CHECK-LABEL: fma_reassociate:
62 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
63 ; CHECK-NEXT: vfmadd.vv v11, v10, v12, v0.t
64 ; CHECK-NEXT: vfmadd.vv v9, v8, v11, v0.t
65 ; CHECK-NEXT: vmv.v.v v8, v9
67 %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %vl)
68 %2 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %c, <vscale x 1 x double> %d, <vscale x 1 x i1> %m, i32 %vl)
69 %3 = call fast <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %m, i32 %vl)
70 %4 = call fast <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %3, <vscale x 1 x double> %e, <vscale x 1 x i1> %m, i32 %vl)
71 ret <vscale x 1 x double> %4