1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
4 declare <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
5 declare <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
6 declare <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x i1> %m, i32 %vl)
8 ; (fsub (fmul x, y), z)) -> (fma x, y, (fneg z))
9 define <vscale x 1 x double> @test1(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
12 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
13 ; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t
14 ; CHECK-NEXT: vmv.v.v v8, v9
16 %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
17 %2 = call fast <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
18 ret <vscale x 1 x double> %2
21 ; (fsub z, (fmul x, y))) -> (fma (fneg y), x, z)
22 define <vscale x 1 x double> @test2(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
25 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
26 ; CHECK-NEXT: vfnmsub.vv v9, v8, v10, v0.t
27 ; CHECK-NEXT: vmv.v.v v8, v9
29 %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
30 %2 = call fast <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %z, <vscale x 1 x double> %1, <vscale x 1 x i1> %m, i32 %vl)
31 ret <vscale x 1 x double> %2
34 ; (fsub (fneg (fmul x, y))), z) -> (fma (fneg x), y, (fneg z))
35 define <vscale x 1 x double> @test3(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 zeroext %vl) {
38 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
39 ; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t
40 ; CHECK-NEXT: vmv.v.v v8, v9
42 %1 = call fast <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %x, <vscale x 1 x double> %y, <vscale x 1 x i1> %m, i32 %vl)
43 %2 = call fast <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x i1> %m, i32 %vl)
44 %3 = call fast <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %1, <vscale x 1 x double> %z, <vscale x 1 x i1> %m, i32 %vl)
45 ret <vscale x 1 x double> %3