1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=avx512f | FileCheck %s
4 declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32)
5 declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32)
6 declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32)
7 declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32)
9 define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load0(ptr %x0ptr, <4 x float> %x1, <4 x float> %x2) {
10 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_load0:
12 ; CHECK-NEXT: vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
13 ; CHECK-NEXT: vmovaps %xmm1, %xmm0
15 %x0 = load <4 x float>, ptr %x0ptr
16 %res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
20 define <4 x float> @test_int_x86_avx512_mask3_vfmadd_ss_load1(<4 x float> %x0, ptr %x1ptr, <4 x float> %x2){
21 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_load1:
23 ; CHECK-NEXT: vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
24 ; CHECK-NEXT: vmovaps %xmm1, %xmm0
26 %x1 = load <4 x float>, ptr %x1ptr
27 %res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
31 define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load0(ptr %x0ptr, <2 x double> %x1, <2 x double> %x2) {
32 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd_load0:
34 ; CHECK-NEXT: vfmadd231sd {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
35 ; CHECK-NEXT: vmovapd %xmm1, %xmm0
37 %x0 = load <2 x double>, ptr %x0ptr
38 %res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
42 define <2 x double> @test_int_x86_avx512_mask3_vfmadd_sd_load1(<2 x double> %x0, ptr %x1ptr, <2 x double> %x2){
43 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd_load1:
45 ; CHECK-NEXT: vfmadd231sd {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
46 ; CHECK-NEXT: vmovapd %xmm1, %xmm0
48 %x1 = load <2 x double>, ptr %x1ptr
49 %res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
53 define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load0(ptr %x0ptr, <4 x float> %x1, <4 x float> %x2) {
54 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss_load0:
56 ; CHECK-NEXT: vfmsub231ss {{.*#+}} xmm1 = (xmm0 * mem) - xmm1
57 ; CHECK-NEXT: vmovaps %xmm1, %xmm0
59 %x0 = load <4 x float>, ptr %x0ptr
60 %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
64 define <4 x float> @test_int_x86_avx512_mask3_vfmsub_ss_load1(<4 x float> %x0, ptr %x1ptr, <4 x float> %x2){
65 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss_load1:
67 ; CHECK-NEXT: vfmsub231ss {{.*#+}} xmm1 = (xmm0 * mem) - xmm1
68 ; CHECK-NEXT: vmovaps %xmm1, %xmm0
70 %x1 = load <4 x float>, ptr %x1ptr
71 %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
75 define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd_load0(ptr %x0ptr, <2 x double> %x1, <2 x double> %x2) {
76 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd_load0:
78 ; CHECK-NEXT: vfmsub231sd {{.*#+}} xmm1 = (xmm0 * mem) - xmm1
79 ; CHECK-NEXT: vmovapd %xmm1, %xmm0
81 %x0 = load <2 x double>, ptr %x0ptr
82 %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
86 define <2 x double> @test_int_x86_avx512_mask3_vfmsub_sd_load1(<2 x double> %x0, ptr %x1ptr, <2 x double> %x2){
87 ; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd_load1:
89 ; CHECK-NEXT: vfmsub231sd {{.*#+}} xmm1 = (xmm0 * mem) - xmm1
90 ; CHECK-NEXT: vmovapd %xmm1, %xmm0
92 %x1 = load <2 x double>, ptr %x1ptr
93 %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)