1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O3 | FileCheck %s
5 declare <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half>, <32 x half>, metadata, metadata)
6 declare <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half>, <32 x half>, metadata, metadata)
7 declare <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half>, <32 x half>, metadata, metadata)
8 declare <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half>, <32 x half>, metadata, metadata)
9 declare <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half>, metadata, metadata)
10 declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(<8 x half>, metadata)
11 declare <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half>, metadata)
12 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double>, metadata, metadata)
13 declare <16 x half> @llvm.experimental.constrained.fptrunc.v16f16.v16f32(<16 x float>, metadata, metadata)
14 declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata)
15 declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata)
16 declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata)
17 declare <32 x half> @llvm.experimental.constrained.rint.v32f16(<32 x half>, metadata, metadata)
18 declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata)
20 define <32 x half> @f2(<32 x half> %a, <32 x half> %b) #0 {
23 ; CHECK-NEXT: vaddph %zmm1, %zmm0, %zmm0
24 ; CHECK-NEXT: ret{{[l|q]}}
25 %ret = call <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half> %a, <32 x half> %b,
26 metadata !"round.dynamic",
27 metadata !"fpexcept.strict") #0
31 define <32 x half> @f4(<32 x half> %a, <32 x half> %b) #0 {
34 ; CHECK-NEXT: vsubph %zmm1, %zmm0, %zmm0
35 ; CHECK-NEXT: ret{{[l|q]}}
36 %ret = call <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half> %a, <32 x half> %b,
37 metadata !"round.dynamic",
38 metadata !"fpexcept.strict") #0
42 define <32 x half> @f6(<32 x half> %a, <32 x half> %b) #0 {
45 ; CHECK-NEXT: vmulph %zmm1, %zmm0, %zmm0
46 ; CHECK-NEXT: ret{{[l|q]}}
47 %ret = call <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half> %a, <32 x half> %b,
48 metadata !"round.dynamic",
49 metadata !"fpexcept.strict") #0
53 define <32 x half> @f8(<32 x half> %a, <32 x half> %b) #0 {
56 ; CHECK-NEXT: vdivph %zmm1, %zmm0, %zmm0
57 ; CHECK-NEXT: ret{{[l|q]}}
58 %ret = call <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half> %a, <32 x half> %b,
59 metadata !"round.dynamic",
60 metadata !"fpexcept.strict") #0
64 define <32 x half> @f10(<32 x half> %a) #0 {
67 ; CHECK-NEXT: vsqrtph %zmm0, %zmm0
68 ; CHECK-NEXT: ret{{[l|q]}}
69 %ret = call <32 x half> @llvm.experimental.constrained.sqrt.v32f16(
71 metadata !"round.dynamic",
72 metadata !"fpexcept.strict") #0
76 define <8 x double> @f11(<8 x half> %a) #0 {
79 ; CHECK-NEXT: vcvtph2pd %xmm0, %zmm0
80 ; CHECK-NEXT: ret{{[l|q]}}
81 %ret = call <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(
83 metadata !"fpexcept.strict") #0
87 define <8 x half> @f12(<8 x double> %a) #0 {
90 ; CHECK-NEXT: vcvtpd2ph %zmm0, %xmm0
91 ; CHECK-NEXT: vzeroupper
92 ; CHECK-NEXT: ret{{[l|q]}}
93 %ret = call <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(
95 metadata !"round.dynamic",
96 metadata !"fpexcept.strict") #0
100 define <16 x float> @f14(<16 x half> %a) #0 {
103 ; CHECK-NEXT: vcvtph2psx %ymm0, %zmm0
104 ; CHECK-NEXT: ret{{[l|q]}}
105 %ret = call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(
107 metadata !"fpexcept.strict") #0
108 ret <16 x float> %ret
111 define <16 x half> @f15(<16 x float> %a) #0 {
114 ; CHECK-NEXT: vcvtps2phx %zmm0, %ymm0
115 ; CHECK-NEXT: ret{{[l|q]}}
116 %ret = call <16 x half> @llvm.experimental.constrained.fptrunc.v16f16.v16f32(
118 metadata !"round.dynamic",
119 metadata !"fpexcept.strict") #0
123 define <32 x half> @strict_vector_fceil_v32f16(<32 x half> %f) #0 {
124 ; CHECK-LABEL: strict_vector_fceil_v32f16:
126 ; CHECK-NEXT: vrndscaleph $10, %zmm0, %zmm0
127 ; CHECK-NEXT: ret{{[l|q]}}
128 %res = call <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half> %f, metadata !"fpexcept.strict") #0
132 define <32 x half> @strict_vector_ffloor_v32f16(<32 x half> %f) #0 {
133 ; CHECK-LABEL: strict_vector_ffloor_v32f16:
135 ; CHECK-NEXT: vrndscaleph $9, %zmm0, %zmm0
136 ; CHECK-NEXT: ret{{[l|q]}}
137 %res = call <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half> %f, metadata !"fpexcept.strict") #0
141 define <32 x half> @strict_vector_ftrunc_v32f16(<32 x half> %f) #0 {
142 ; CHECK-LABEL: strict_vector_ftrunc_v32f16:
144 ; CHECK-NEXT: vrndscaleph $11, %zmm0, %zmm0
145 ; CHECK-NEXT: ret{{[l|q]}}
146 %res = call <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half> %f, metadata !"fpexcept.strict") #0
150 define <32 x half> @strict_vector_frint_v32f16(<32 x half> %f) #0 {
151 ; CHECK-LABEL: strict_vector_frint_v32f16:
153 ; CHECK-NEXT: vrndscaleph $4, %zmm0, %zmm0
154 ; CHECK-NEXT: ret{{[l|q]}}
155 %res = call <32 x half> @llvm.experimental.constrained.rint.v32f16(<32 x half> %f,
156 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
160 define <32 x half> @strict_vector_fnearbyint_v32f16(<32 x half> %f) #0 {
161 ; CHECK-LABEL: strict_vector_fnearbyint_v32f16:
163 ; CHECK-NEXT: vrndscaleph $12, %zmm0, %zmm0
164 ; CHECK-NEXT: ret{{[l|q]}}
165 %res = call <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half> %f,
166 metadata !"round.dynamic", metadata !"fpexcept.strict") #0
170 attributes #0 = { strictfp }