1 ; RUN: llc -march=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
2 ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -denormal-fp-math-f32=preserve-sign -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
3 ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -denormal-fp-math-f32=preserve-sign -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
4 ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -denormal-fp-math-f32=preserve-sign -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
5 ; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -denormal-fp-math-f32=preserve-sign -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
7 ; Make sure fdiv is promoted to f32.
9 ; GCN-LABEL: {{^}}v_fdiv_f16
13 ; SI-DAG: v_div_scale_f32
25 ; GFX8_9_10: {{flat|global}}_load_ushort [[LHS:v[0-9]+]]
26 ; GFX8_9_10: {{flat|global}}_load_ushort [[RHS:v[0-9]+]]
28 ; GFX8_9_10-DAG: v_cvt_f32_f16_e32 [[CVT_LHS:v[0-9]+]], [[LHS]]
29 ; GFX8_9_10-DAG: v_cvt_f32_f16_e32 [[CVT_RHS:v[0-9]+]], [[RHS]]
31 ; GFX8_9_10-DAG: v_rcp_f32_e32 [[RCP_RHS:v[0-9]+]], [[CVT_RHS]]
32 ; GFX8_9_10: v_mul_f32_e32 [[MUL:v[0-9]+]], [[CVT_LHS]], [[RCP_RHS]]
33 ; GFX8_9_10: v_cvt_f16_f32_e32 [[CVT_BACK:v[0-9]+]], [[MUL]]
34 ; GFX8_9_10: v_div_fixup_f16 [[RESULT:v[0-9]+]], [[CVT_BACK]], [[RHS]], [[LHS]]
35 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
36 define amdgpu_kernel void @v_fdiv_f16(
37 half addrspace(1)* %r,
38 half addrspace(1)* %a,
39 half addrspace(1)* %b) #0 {
41 %tid = call i32 @llvm.amdgcn.workitem.id.x()
42 %tid.ext = sext i32 %tid to i64
43 %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
44 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
45 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
46 %a.val = load volatile half, half addrspace(1)* %gep.a
47 %b.val = load volatile half, half addrspace(1)* %gep.b
48 %r.val = fdiv half %a.val, %b.val
49 store half %r.val, half addrspace(1)* %gep.r
53 ; GCN-LABEL: {{^}}v_rcp_f16:
54 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
55 ; GFX8_9_10-NOT: [[VAL]]
56 ; GFX8_9_10: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
57 ; GFX8_9_10-NOT: [[RESULT]]
58 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
59 define amdgpu_kernel void @v_rcp_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
61 %tid = call i32 @llvm.amdgcn.workitem.id.x()
62 %tid.ext = sext i32 %tid to i64
63 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
64 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
65 %b.val = load volatile half, half addrspace(1)* %gep.b
66 %r.val = fdiv half 1.0, %b.val, !fpmath !0
67 store half %r.val, half addrspace(1)* %gep.r
71 ; GCN-LABEL: {{^}}v_rcp_f16_abs:
72 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
73 ; GFX8_9_10-NOT: [[VAL]]
74 ; GFX8_9_10: v_rcp_f16_e64 [[RESULT:v[0-9]+]], |[[VAL]]|
75 ; GFX8_9_10-NOT: [RESULT]]
76 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
77 define amdgpu_kernel void @v_rcp_f16_abs(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
79 %tid = call i32 @llvm.amdgcn.workitem.id.x()
80 %tid.ext = sext i32 %tid to i64
81 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
82 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
83 %b.val = load volatile half, half addrspace(1)* %gep.b
84 %b.abs = call half @llvm.fabs.f16(half %b.val)
85 %r.val = fdiv half 1.0, %b.abs, !fpmath !0
86 store half %r.val, half addrspace(1)* %gep.r
90 ; We could not do 1/b -> rcp_f16(b) under !fpmath < 1ulp.
92 ; GCN-LABEL: {{^}}reciprocal_f16_rounded:
93 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL16:v[0-9]+]], v{{.+}}
94 ; GFX8_9_10: v_cvt_f32_f16_e32 [[CVT_TO32:v[0-9]+]], [[VAL16]]
95 ; GFX8_9_10: v_rcp_f32_e32 [[RCP32:v[0-9]+]], [[CVT_TO32]]
96 ; GFX8_9_10: v_cvt_f16_f32_e32 [[CVT_BACK16:v[0-9]+]], [[RCP32]]
97 ; GFX8_9_10: v_div_fixup_f16 [[RESULT:v[0-9]+]], [[CVT_BACK16]], [[VAL16]], 1.0
98 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
99 define amdgpu_kernel void @reciprocal_f16_rounded(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
101 %tid = call i32 @llvm.amdgcn.workitem.id.x()
102 %tid.ext = sext i32 %tid to i64
103 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
104 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
105 %b.val = load volatile half, half addrspace(1)* %gep.b
106 %r.val = fdiv half 1.0, %b.val
107 store half %r.val, half addrspace(1)* %gep.r
111 ; GCN-LABEL: {{^}}v_rcp_f16_afn:
112 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
113 ; GFX8_9_10-NOT: [[VAL]]
114 ; GFX8_9_10: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
115 ; GFX8_9_10-NOT: [[RESULT]]
116 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
117 define amdgpu_kernel void @v_rcp_f16_afn(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
119 %tid = call i32 @llvm.amdgcn.workitem.id.x()
120 %tid.ext = sext i32 %tid to i64
121 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
122 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
123 %b.val = load volatile half, half addrspace(1)* %gep.b
124 %r.val = fdiv afn half 1.0, %b.val, !fpmath !0
125 store half %r.val, half addrspace(1)* %gep.r
129 ; GCN-LABEL: {{^}}v_rcp_f16_neg:
130 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
131 ; GFX8_9_10-NOT: [[VAL]]
132 ; GFX8_9_10: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[VAL]]
133 ; GFX8_9_10-NOT: [RESULT]]
134 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
135 define amdgpu_kernel void @v_rcp_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
137 %tid = call i32 @llvm.amdgcn.workitem.id.x()
138 %tid.ext = sext i32 %tid to i64
139 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
140 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
141 %b.val = load volatile half, half addrspace(1)* %gep.b
142 %r.val = fdiv half -1.0, %b.val, !fpmath !0
143 store half %r.val, half addrspace(1)* %gep.r
147 ; GCN-LABEL: {{^}}v_rsq_f16:
148 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
149 ; GFX8_9_10-NOT: [[VAL]]
150 ; GFX8_9_10: v_rsq_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
151 ; GFX8_9_10-NOT: [RESULT]]
152 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
153 define amdgpu_kernel void @v_rsq_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
155 %tid = call i32 @llvm.amdgcn.workitem.id.x()
156 %tid.ext = sext i32 %tid to i64
157 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
158 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
159 %b.val = load volatile half, half addrspace(1)* %gep.b
160 %b.sqrt = call half @llvm.sqrt.f16(half %b.val)
161 %r.val = fdiv half 1.0, %b.sqrt, !fpmath !0
162 store half %r.val, half addrspace(1)* %gep.r
166 ; GCN-LABEL: {{^}}v_rsq_f16_neg:
167 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
168 ; GFX8_9_10-NOT: [[VAL]]
169 ; GFX8_9_10: v_sqrt_f16_e32 [[SQRT:v[0-9]+]], [[VAL]]
170 ; GFX8_9_10-NEXT: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[SQRT]]
171 ; GFX8_9_10-NOT: [RESULT]]
172 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
173 define amdgpu_kernel void @v_rsq_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
175 %tid = call i32 @llvm.amdgcn.workitem.id.x()
176 %tid.ext = sext i32 %tid to i64
177 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
178 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
179 %b.val = load volatile half, half addrspace(1)* %gep.b
180 %b.sqrt = call half @llvm.sqrt.f16(half %b.val)
181 %r.val = fdiv half -1.0, %b.sqrt, !fpmath !0
182 store half %r.val, half addrspace(1)* %gep.r
186 ; GCN-LABEL: {{^}}v_fdiv_f16_afn:
187 ; GFX8_9_10: {{flat|global}}_load_ushort [[LHS:v[0-9]+]]
188 ; GFX8_9_10: {{flat|global}}_load_ushort [[RHS:v[0-9]+]]
190 ; GFX8_9_10: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]]
191 ; GFX8_9_10: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[LHS]], [[RCP]]
193 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
194 define amdgpu_kernel void @v_fdiv_f16_afn(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #0 {
196 %tid = call i32 @llvm.amdgcn.workitem.id.x()
197 %tid.ext = sext i32 %tid to i64
198 %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
199 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
200 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
201 %a.val = load volatile half, half addrspace(1)* %gep.a
202 %b.val = load volatile half, half addrspace(1)* %gep.b
203 %r.val = fdiv afn half %a.val, %b.val
204 store half %r.val, half addrspace(1)* %gep.r
208 ; GCN-LABEL: {{^}}v_fdiv_f16_unsafe:
209 ; GFX8_9_10: {{flat|global}}_load_ushort [[LHS:v[0-9]+]]
210 ; GFX8_9_10: {{flat|global}}_load_ushort [[RHS:v[0-9]+]]
212 ; GFX8_9_10: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]]
213 ; GFX8_9_10: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[LHS]], [[RCP]]
215 ; GFX8_9_10: {{flat|global}}_store_short v{{.+}}, [[RESULT]]
216 define amdgpu_kernel void @v_fdiv_f16_unsafe(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #2 {
218 %tid = call i32 @llvm.amdgcn.workitem.id.x()
219 %tid.ext = sext i32 %tid to i64
220 %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
221 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
222 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
223 %a.val = load volatile half, half addrspace(1)* %gep.a
224 %b.val = load volatile half, half addrspace(1)* %gep.b
225 %r.val = fdiv half %a.val, %b.val
226 store half %r.val, half addrspace(1)* %gep.r
230 ; SI-LABEL: {{^}}div_afn_2_x_pat_f16:
231 ; SI: v_mul_f32_e32 v{{[0-9]+}}, 0.5, v{{[0-9]+}}
233 ; GFX8_9_10: v_mul_f16_e32 [[MUL:v[0-9]+]], 0.5, v{{[0-9]+}}
234 ; GFX8_9_10: buffer_store_short [[MUL]]
235 define amdgpu_kernel void @div_afn_2_x_pat_f16(half addrspace(1)* %out) #0 {
236 %x = load half, half addrspace(1)* undef
237 %rcp = fdiv afn half %x, 2.0
238 store half %rcp, half addrspace(1)* %out, align 4
242 ; SI-LABEL: {{^}}div_afn_k_x_pat_f16:
243 ; SI: v_mul_f32_e32 v{{[0-9]+}}, 0x3dcccccd, v{{[0-9]+}}
245 ; GFX8_9_10: v_mul_f16_e32 [[MUL:v[0-9]+]], 0x2e66, v{{[0-9]+}}
246 ; GFX8_9_10: buffer_store_short [[MUL]]
247 define amdgpu_kernel void @div_afn_k_x_pat_f16(half addrspace(1)* %out) #0 {
248 %x = load half, half addrspace(1)* undef
249 %rcp = fdiv afn half %x, 10.0
250 store half %rcp, half addrspace(1)* %out, align 4
254 ; SI-LABEL: {{^}}div_afn_neg_k_x_pat_f16:
255 ; SI: v_mul_f32_e32 v{{[0-9]+}}, 0xbdcccccd, v{{[0-9]+}}
257 ; GFX8_9_10: v_mul_f16_e32 [[MUL:v[0-9]+]], 0xae66, v{{[0-9]+}}
258 ; GFX8_9_10: buffer_store_short [[MUL]]
259 define amdgpu_kernel void @div_afn_neg_k_x_pat_f16(half addrspace(1)* %out) #0 {
260 %x = load half, half addrspace(1)* undef
261 %rcp = fdiv afn half %x, -10.0
262 store half %rcp, half addrspace(1)* %out, align 4
266 declare i32 @llvm.amdgcn.workitem.id.x() #2
267 declare half @llvm.sqrt.f16(half) #2
268 declare half @llvm.fabs.f16(half) #2
270 attributes #0 = { nounwind }
271 attributes #1 = { nounwind readnone }
272 attributes #2 = { nounwind "unsafe-fp-math"="true" }
274 !0 = !{float 2.500000e+00}