1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 < %s | FileCheck -check-prefix=OPT %s
3 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -check-prefix=GCN %s
5 ; Make sure we match the addressing mode offset of globla.atomic.fadd intrinsics across blocks.
7 define amdgpu_kernel void @test_sink_small_offset_global_atomic_fadd_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
8 ; OPT-LABEL: define amdgpu_kernel void @test_sink_small_offset_global_atomic_fadd_f32(
9 ; OPT-SAME: ptr addrspace(1) [[OUT:%.*]], ptr addrspace(1) [[IN:%.*]]) #[[ATTR0:[0-9]+]] {
10 ; OPT-NEXT: [[ENTRY:.*]]:
11 ; OPT-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #[[ATTR2:[0-9]+]]
12 ; OPT-NEXT: [[CMP:%.*]] = icmp eq i32 [[TID]], 0
13 ; OPT-NEXT: br i1 [[CMP]], label %[[ENDIF:.*]], label %[[IF:.*]]
15 ; OPT-NEXT: [[IN_GEP:%.*]] = getelementptr float, ptr addrspace(1) [[IN]], i32 7
16 ; OPT-NEXT: [[FADD2:%.*]] = atomicrmw fadd ptr addrspace(1) [[IN_GEP]], float 2.000000e+00 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0:![0-9]+]], !amdgpu.ignore.denormal.mode [[META0]]
17 ; OPT-NEXT: [[VAL:%.*]] = load volatile float, ptr addrspace(1) undef, align 4
18 ; OPT-NEXT: br label %[[ENDIF]]
20 ; OPT-NEXT: [[X:%.*]] = phi float [ [[VAL]], %[[IF]] ], [ 0.000000e+00, %[[ENTRY]] ]
21 ; OPT-NEXT: [[OUT_GEP:%.*]] = getelementptr float, ptr addrspace(1) [[OUT]], i32 999999
22 ; OPT-NEXT: store float [[X]], ptr addrspace(1) [[OUT_GEP]], align 4
23 ; OPT-NEXT: br label %[[DONE:.*]]
27 ; GCN-LABEL: test_sink_small_offset_global_atomic_fadd_f32:
28 ; GCN: ; %bb.0: ; %entry
29 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
30 ; GCN-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
31 ; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
32 ; GCN-NEXT: v_mov_b32_e32 v0, 0
33 ; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc
34 ; GCN-NEXT: s_cbranch_execz .LBB0_2
35 ; GCN-NEXT: ; %bb.1: ; %if
36 ; GCN-NEXT: v_mov_b32_e32 v0, 0
37 ; GCN-NEXT: v_mov_b32_e32 v1, 2.0
38 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
39 ; GCN-NEXT: global_atomic_add_f32 v0, v1, s[2:3] offset:28
40 ; GCN-NEXT: s_waitcnt vmcnt(0)
41 ; GCN-NEXT: buffer_wbinvl1_vol
42 ; GCN-NEXT: global_load_dword v0, v[0:1], off glc
43 ; GCN-NEXT: s_waitcnt vmcnt(0)
44 ; GCN-NEXT: .LBB0_2: ; %endif
45 ; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
46 ; GCN-NEXT: v_mov_b32_e32 v1, 0x3d0000
47 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
48 ; GCN-NEXT: global_store_dword v1, v0, s[0:1] offset:2300
51 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
52 %cmp = icmp eq i32 %tid, 0
53 br i1 %cmp, label %endif, label %if
56 %in.gep = getelementptr float, ptr addrspace(1) %in, i32 7
57 %fadd2 = atomicrmw fadd ptr addrspace(1) %in.gep, float 2.000000e+00 syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0
58 %val = load volatile float, ptr addrspace(1) undef
62 %x = phi float [ %val, %if ], [ 0.0, %entry ]
63 %out.gep = getelementptr float, ptr addrspace(1) %out, i32 999999
64 store float %x, ptr addrspace(1) %out.gep
71 declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
72 declare float @llvm.amdgcn.global.atomic.fadd.f32.p1.f32(ptr addrspace(1) nocapture, float) #2
74 attributes #0 = { argmemonly nounwind }
75 attributes #1 = { nounwind readnone willreturn }
76 attributes #2 = { argmemonly nounwind willreturn }
80 ; OPT: [[META0]] = !{}