1 # RUN: llc -march=amdgcn -mcpu=gfx803 -run-pass si-memory-legalizer %s -o - | FileCheck %s
4 declare i32 @llvm.amdgcn.workitem.id.x() #0
6 define amdgpu_kernel void @atomic_max_i32_noret(
7 i32 addrspace(1)* %out,
8 i32 addrspace(1)* addrspace(1)* %in,
11 %tid = call i32 @llvm.amdgcn.workitem.id.x()
12 %idxprom = sext i32 %tid to i64
13 %tid.gep = getelementptr i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %in, i64 %idxprom
14 %ptr = load volatile i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %tid.gep
15 %xor = xor i32 %tid, 1
16 %cmp = icmp ne i32 %xor, 0
17 %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %cmp)
18 %2 = extractvalue { i1, i64 } %1, 0
19 %3 = extractvalue { i1, i64 } %1, 1
20 br i1 %2, label %atomic, label %exit
23 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 100
24 %ret = atomicrmw max i32 addrspace(1)* %gep, i32 %y seq_cst
27 exit: ; preds = %atomic, %0
28 call void @llvm.amdgcn.end.cf(i64 %3)
32 declare { i1, i64 } @llvm.amdgcn.if(i1)
34 declare void @llvm.amdgcn.end.cf(i64)
36 ; Function Attrs: nounwind
37 declare void @llvm.stackprotector(i8*, i8**) #3
39 attributes #0 = { nounwind readnone "target-cpu"="tahiti" }
40 attributes #1 = { nounwind "target-cpu"="tahiti" }
41 attributes #2 = { readnone }
42 attributes #3 = { nounwind }
47 # CHECK-LABEL: name: atomic_max_i32_noret
49 # CHECK-LABEL: bb.1.atomic:
50 # CHECK: BUFFER_ATOMIC_SMAX_ADDR64
51 # CHECK-NEXT: S_WAITCNT 3952
52 # CHECK-NEXT: BUFFER_WBINVL1_VOL
54 name: atomic_max_i32_noret
56 exposesReturnsTwice: false
58 regBankSelected: false
60 tracksRegLiveness: true
62 - { reg: '$sgpr0_sgpr1' }
65 isFrameAddressTaken: false
66 isReturnAddressTaken: false
75 hasOpaqueSPAdjustment: false
77 hasMustTailInVarArgFunc: false
80 successors: %bb.1.atomic(0x40000000), %bb.2.exit(0x40000000)
81 liveins: $vgpr0, $sgpr0_sgpr1
83 $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(4)* undef`)
84 $vgpr1 = V_ASHRREV_I32_e32 31, $vgpr0, implicit $exec
85 $vgpr1_vgpr2 = V_LSHL_B64 $vgpr0_vgpr1, 3, implicit $exec
86 $sgpr7 = S_MOV_B32 61440
89 $vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed $vgpr1_vgpr2, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 8 from %ir.tid.gep)
90 $vgpr0 = V_XOR_B32_e32 1, killed $vgpr0, implicit $exec
91 V_CMP_NE_U32_e32 0, killed $vgpr0, implicit-def $vcc, implicit $exec
92 $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
93 $sgpr2_sgpr3 = S_XOR_B64 $exec, killed $sgpr2_sgpr3, implicit-def dead $scc
94 SI_MASK_BRANCH %bb.2.exit, implicit $exec
97 successors: %bb.2.exit(0x80000000)
98 liveins: $sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, $sgpr0_sgpr1, $sgpr2_sgpr3, $vgpr1_vgpr2_vgpr3_vgpr4:0x00000003
100 $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(4)* undef`)
101 dead $vgpr0 = V_MOV_B32_e32 -1, implicit $exec
102 dead $vgpr0 = V_MOV_B32_e32 61440, implicit $exec
103 $sgpr4_sgpr5 = S_MOV_B64 0
105 $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
107 BUFFER_ATOMIC_SMAX_ADDR64 killed $vgpr0, killed $vgpr1_vgpr2, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit $exec :: (volatile load syncscope("one-as") seq_cst 4 from %ir.gep)
110 liveins: $sgpr2_sgpr3
112 $exec = S_OR_B64 $exec, killed $sgpr2_sgpr3, implicit-def $scc