1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
2 ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -mattr=+fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
3 ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -mattr=-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
4 ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
5 ; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX8_9_10 %s
7 ; Make sure fdiv is promoted to f32.
9 ; GCN-LABEL: {{^}}v_fdiv_f16
13 ; SI-DAG: v_div_scale_f32
25 ; GFX8_9_10: {{flat|global}}_load_ushort [[LHS:v[0-9]+]]
26 ; GFX8_9_10: {{flat|global}}_load_ushort [[RHS:v[0-9]+]]
28 ; GFX8_9_10-DAG: v_cvt_f32_f16_e32 [[CVT_LHS:v[0-9]+]], [[LHS]]
29 ; GFX8_9_10-DAG: v_cvt_f32_f16_e32 [[CVT_RHS:v[0-9]+]], [[RHS]]
31 ; GFX8_9_10-DAG: v_rcp_f32_e32 [[RCP_RHS:v[0-9]+]], [[CVT_RHS]]
32 ; GFX8_9_10: v_mul_f32_e32 [[MUL:v[0-9]+]], [[CVT_LHS]], [[RCP_RHS]]
33 ; GFX8_9_10: v_cvt_f16_f32_e32 [[CVT_BACK:v[0-9]+]], [[MUL]]
34 ; GFX8_9_10: v_div_fixup_f16 [[RESULT:v[0-9]+]], [[CVT_BACK]], [[RHS]], [[LHS]]
35 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
36 define amdgpu_kernel void @v_fdiv_f16(
37 half addrspace(1)* %r,
38 half addrspace(1)* %a,
39 half addrspace(1)* %b) #0 {
41 %tid = call i32 @llvm.amdgcn.workitem.id.x()
42 %tid.ext = sext i32 %tid to i64
43 %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
44 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
45 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
46 %a.val = load volatile half, half addrspace(1)* %gep.a
47 %b.val = load volatile half, half addrspace(1)* %gep.b
48 %r.val = fdiv half %a.val, %b.val
49 store half %r.val, half addrspace(1)* %gep.r
53 ; GCN-LABEL: {{^}}v_rcp_f16:
54 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
55 ; GFX8_9_10-NOT: [[VAL]]
56 ; GFX8_9_10: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
57 ; GFX8_9_10-NOT: [[RESULT]]
58 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
59 define amdgpu_kernel void @v_rcp_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
61 %tid = call i32 @llvm.amdgcn.workitem.id.x()
62 %tid.ext = sext i32 %tid to i64
63 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
64 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
65 %b.val = load volatile half, half addrspace(1)* %gep.b
66 %r.val = fdiv half 1.0, %b.val
67 store half %r.val, half addrspace(1)* %gep.r
71 ; GCN-LABEL: {{^}}v_rcp_f16_abs:
72 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
73 ; GFX8_9_10-NOT: [[VAL]]
74 ; GFX8_9_10: v_rcp_f16_e64 [[RESULT:v[0-9]+]], |[[VAL]]|
75 ; GFX8_9_10-NOT: [RESULT]]
76 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
77 define amdgpu_kernel void @v_rcp_f16_abs(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
79 %tid = call i32 @llvm.amdgcn.workitem.id.x()
80 %tid.ext = sext i32 %tid to i64
81 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
82 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
83 %b.val = load volatile half, half addrspace(1)* %gep.b
84 %b.abs = call half @llvm.fabs.f16(half %b.val)
85 %r.val = fdiv half 1.0, %b.abs
86 store half %r.val, half addrspace(1)* %gep.r
90 ; GCN-LABEL: {{^}}v_rcp_f16_arcp:
91 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
92 ; GFX8_9_10-NOT: [[VAL]]
93 ; GFX8_9_10: v_rcp_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
94 ; GFX8_9_10-NOT: [[RESULT]]
95 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
96 define amdgpu_kernel void @v_rcp_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
98 %tid = call i32 @llvm.amdgcn.workitem.id.x()
99 %tid.ext = sext i32 %tid to i64
100 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
101 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
102 %b.val = load volatile half, half addrspace(1)* %gep.b
103 %r.val = fdiv arcp half 1.0, %b.val
104 store half %r.val, half addrspace(1)* %gep.r
108 ; GCN-LABEL: {{^}}v_rcp_f16_neg:
109 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
110 ; GFX8_9_10-NOT: [[VAL]]
111 ; GFX8_9_10: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[VAL]]
112 ; GFX8_9_10-NOT: [RESULT]]
113 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
114 define amdgpu_kernel void @v_rcp_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
116 %tid = call i32 @llvm.amdgcn.workitem.id.x()
117 %tid.ext = sext i32 %tid to i64
118 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
119 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
120 %b.val = load volatile half, half addrspace(1)* %gep.b
121 %r.val = fdiv half -1.0, %b.val
122 store half %r.val, half addrspace(1)* %gep.r
126 ; GCN-LABEL: {{^}}v_rsq_f16:
127 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
128 ; GFX8_9_10-NOT: [[VAL]]
129 ; GFX8_9_10: v_rsq_f16_e32 [[RESULT:v[0-9]+]], [[VAL]]
130 ; GFX8_9_10-NOT: [RESULT]]
131 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
132 define amdgpu_kernel void @v_rsq_f16(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
134 %tid = call i32 @llvm.amdgcn.workitem.id.x()
135 %tid.ext = sext i32 %tid to i64
136 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
137 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
138 %b.val = load volatile half, half addrspace(1)* %gep.b
139 %b.sqrt = call half @llvm.sqrt.f16(half %b.val)
140 %r.val = fdiv half 1.0, %b.sqrt
141 store half %r.val, half addrspace(1)* %gep.r
145 ; GCN-LABEL: {{^}}v_rsq_f16_neg:
146 ; GFX8_9_10: {{flat|global}}_load_ushort [[VAL:v[0-9]+]]
147 ; GFX8_9_10-NOT: [[VAL]]
148 ; GFX8_9_10: v_sqrt_f16_e32 [[SQRT:v[0-9]+]], [[VAL]]
149 ; GFX8_9_10-NEXT: v_rcp_f16_e64 [[RESULT:v[0-9]+]], -[[SQRT]]
150 ; GFX8_9_10-NOT: [RESULT]]
151 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
152 define amdgpu_kernel void @v_rsq_f16_neg(half addrspace(1)* %r, half addrspace(1)* %b) #0 {
154 %tid = call i32 @llvm.amdgcn.workitem.id.x()
155 %tid.ext = sext i32 %tid to i64
156 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
157 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
158 %b.val = load volatile half, half addrspace(1)* %gep.b
159 %b.sqrt = call half @llvm.sqrt.f16(half %b.val)
160 %r.val = fdiv half -1.0, %b.sqrt
161 store half %r.val, half addrspace(1)* %gep.r
165 ; GCN-LABEL: {{^}}v_fdiv_f16_arcp:
166 ; GFX8_9_10: {{flat|global}}_load_ushort [[LHS:v[0-9]+]]
167 ; GFX8_9_10: {{flat|global}}_load_ushort [[RHS:v[0-9]+]]
169 ; GFX8_9_10: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]]
170 ; GFX8_9_10: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[LHS]], [[RCP]]
172 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
173 define amdgpu_kernel void @v_fdiv_f16_arcp(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #0 {
175 %tid = call i32 @llvm.amdgcn.workitem.id.x()
176 %tid.ext = sext i32 %tid to i64
177 %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
178 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
179 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
180 %a.val = load volatile half, half addrspace(1)* %gep.a
181 %b.val = load volatile half, half addrspace(1)* %gep.b
182 %r.val = fdiv arcp half %a.val, %b.val
183 store half %r.val, half addrspace(1)* %gep.r
187 ; GCN-LABEL: {{^}}v_fdiv_f16_unsafe:
188 ; GFX8_9_10: {{flat|global}}_load_ushort [[LHS:v[0-9]+]]
189 ; GFX8_9_10: {{flat|global}}_load_ushort [[RHS:v[0-9]+]]
191 ; GFX8_9_10: v_rcp_f16_e32 [[RCP:v[0-9]+]], [[RHS]]
192 ; GFX8_9_10: v_mul_f16_e32 [[RESULT:v[0-9]+]], [[LHS]], [[RCP]]
194 ; GFX8_9_10: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
195 define amdgpu_kernel void @v_fdiv_f16_unsafe(half addrspace(1)* %r, half addrspace(1)* %a, half addrspace(1)* %b) #2 {
197 %tid = call i32 @llvm.amdgcn.workitem.id.x()
198 %tid.ext = sext i32 %tid to i64
199 %gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
200 %gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
201 %gep.r = getelementptr inbounds half, half addrspace(1)* %r, i64 %tid.ext
202 %a.val = load volatile half, half addrspace(1)* %gep.a
203 %b.val = load volatile half, half addrspace(1)* %gep.b
204 %r.val = fdiv half %a.val, %b.val
205 store half %r.val, half addrspace(1)* %gep.r
209 ; FUNC-LABEL: {{^}}div_arcp_2_x_pat_f16:
210 ; SI: v_mul_f32_e32 v{{[0-9]+}}, 0.5, v{{[0-9]+}}
212 ; GFX8_9_10: v_mul_f16_e32 [[MUL:v[0-9]+]], 0.5, v{{[0-9]+}}
213 ; GFX8_9_10: buffer_store_short [[MUL]]
214 define amdgpu_kernel void @div_arcp_2_x_pat_f16(half addrspace(1)* %out) #0 {
215 %x = load half, half addrspace(1)* undef
216 %rcp = fdiv arcp half %x, 2.0
217 store half %rcp, half addrspace(1)* %out, align 4
221 ; FUNC-LABEL: {{^}}div_arcp_k_x_pat_f16:
222 ; SI: v_mul_f32_e32 v{{[0-9]+}}, 0x3dccc000, v{{[0-9]+}}
224 ; GFX8_9_10: v_mul_f16_e32 [[MUL:v[0-9]+]], 0x2e66, v{{[0-9]+}}
225 ; GFX8_9_10: buffer_store_short [[MUL]]
226 define amdgpu_kernel void @div_arcp_k_x_pat_f16(half addrspace(1)* %out) #0 {
227 %x = load half, half addrspace(1)* undef
228 %rcp = fdiv arcp half %x, 10.0
229 store half %rcp, half addrspace(1)* %out, align 4
233 ; FUNC-LABEL: {{^}}div_arcp_neg_k_x_pat_f16:
234 ; SI: v_mul_f32_e32 v{{[0-9]+}}, 0xbdccc000, v{{[0-9]+}}
236 ; GFX8_9_10: v_mul_f16_e32 [[MUL:v[0-9]+]], 0xae66, v{{[0-9]+}}
237 ; GFX8_9_10: buffer_store_short [[MUL]]
238 define amdgpu_kernel void @div_arcp_neg_k_x_pat_f16(half addrspace(1)* %out) #0 {
239 %x = load half, half addrspace(1)* undef
240 %rcp = fdiv arcp half %x, -10.0
241 store half %rcp, half addrspace(1)* %out, align 4
245 declare i32 @llvm.amdgcn.workitem.id.x() #1
246 declare half @llvm.sqrt.f16(half) #1
247 declare half @llvm.fabs.f16(half) #1
249 attributes #0 = { nounwind }
250 attributes #1 = { nounwind readnone }
251 attributes #2 = { nounwind "unsafe-fp-math"="true" }