1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s
7 declare <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
8 declare <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
9 declare <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
11 ; (-N0 * -N1) + N2 --> (N0 * N1) + N2
12 define <vscale x 1 x double> @test1(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
15 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
16 ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t
17 ; CHECK-NEXT: vmv.v.v v8, v9
19 %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x i1> %m, i32 %evl)
20 %negb = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
21 %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %negb, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
22 ret <vscale x 1 x double> %v
25 ; (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
26 define <vscale x 1 x double> @test2(<vscale x 1 x double> %a, <vscale x 1 x i1> %m, i32 zeroext %evl) {
29 ; CHECK-NEXT: lui a1, %hi(.LCPI1_0)
30 ; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_0)
31 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
32 ; CHECK-NEXT: vlse64.v v9, (a1), zero
33 ; CHECK-NEXT: lui a1, %hi(.LCPI1_1)
34 ; CHECK-NEXT: fld fa5, %lo(.LCPI1_1)(a1)
35 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
36 ; CHECK-NEXT: vfadd.vf v9, v9, fa5, v0.t
37 ; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
39 %t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 2.0), <vscale x 1 x i1> %m, i32 %evl)
40 %v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 4.0), <vscale x 1 x double> %t, <vscale x 1 x i1> %m, i32 %evl)
41 ret <vscale x 1 x double> %v
44 ; (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
45 define <vscale x 1 x double> @test3(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
48 ; CHECK-NEXT: lui a1, %hi(.LCPI2_0)
49 ; CHECK-NEXT: addi a1, a1, %lo(.LCPI2_0)
50 ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
51 ; CHECK-NEXT: vlse64.v v10, (a1), zero
52 ; CHECK-NEXT: lui a1, %hi(.LCPI2_1)
53 ; CHECK-NEXT: fld fa5, %lo(.LCPI2_1)(a1)
54 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
55 ; CHECK-NEXT: vfmul.vf v10, v10, fa5, v0.t
56 ; CHECK-NEXT: vfmadd.vv v10, v8, v9, v0.t
57 ; CHECK-NEXT: vmv.v.v v8, v10
59 %t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 2.0), <vscale x 1 x i1> %m, i32 %evl)
60 %v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %t, <vscale x 1 x double> splat (double 4.0), <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
61 ret <vscale x 1 x double> %v