1 ; RUN: llc < %s -asm-verbose=false -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
3 define float @test_f32(ptr %A, ptr %B, ptr %C) nounwind {
4 ;CHECK-LABEL: test_f32:
7 %tmp1 = load float, ptr %A
8 %tmp2 = load float, ptr %B
9 %tmp3 = load float, ptr %C
10 %tmp4 = call float @llvm.fmuladd.f32(float %tmp1, float %tmp2, float %tmp3)
14 define <2 x float> @test_v2f32(ptr %A, ptr %B, ptr %C) nounwind {
15 ;CHECK-LABEL: test_v2f32:
18 %tmp1 = load <2 x float>, ptr %A
19 %tmp2 = load <2 x float>, ptr %B
20 %tmp3 = load <2 x float>, ptr %C
21 %tmp4 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
25 define <4 x float> @test_v4f32(ptr %A, ptr %B, ptr %C) nounwind {
26 ;CHECK-LABEL: test_v4f32:
29 %tmp1 = load <4 x float>, ptr %A
30 %tmp2 = load <4 x float>, ptr %B
31 %tmp3 = load <4 x float>, ptr %C
32 %tmp4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
36 define <8 x float> @test_v8f32(ptr %A, ptr %B, ptr %C) nounwind {
37 ;CHECK-LABEL: test_v8f32:
41 %tmp1 = load <8 x float>, ptr %A
42 %tmp2 = load <8 x float>, ptr %B
43 %tmp3 = load <8 x float>, ptr %C
44 %tmp4 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %tmp1, <8 x float> %tmp2, <8 x float> %tmp3)
48 define double @test_f64(ptr %A, ptr %B, ptr %C) nounwind {
49 ;CHECK-LABEL: test_f64:
52 %tmp1 = load double, ptr %A
53 %tmp2 = load double, ptr %B
54 %tmp3 = load double, ptr %C
55 %tmp4 = call double @llvm.fmuladd.f64(double %tmp1, double %tmp2, double %tmp3)
59 define <2 x double> @test_v2f64(ptr %A, ptr %B, ptr %C) nounwind {
60 ;CHECK-LABEL: test_v2f64:
63 %tmp1 = load <2 x double>, ptr %A
64 %tmp2 = load <2 x double>, ptr %B
65 %tmp3 = load <2 x double>, ptr %C
66 %tmp4 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
67 ret <2 x double> %tmp4
70 define <4 x double> @test_v4f64(ptr %A, ptr %B, ptr %C) nounwind {
71 ;CHECK-LABEL: test_v4f64:
75 %tmp1 = load <4 x double>, ptr %A
76 %tmp2 = load <4 x double>, ptr %B
77 %tmp3 = load <4 x double>, ptr %C
78 %tmp4 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %tmp1, <4 x double> %tmp2, <4 x double> %tmp3)
79 ret <4 x double> %tmp4
82 declare float @llvm.fmuladd.f32(float, float, float) nounwind readnone
83 declare <2 x float> @llvm.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>) nounwind readnone
84 declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
85 declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) nounwind readnone
86 declare double @llvm.fmuladd.f64(double, double, double) nounwind readnone
87 declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
88 declare <4 x double> @llvm.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>) nounwind readnone