1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3 ; XUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI %s
5 ; FIXME: broken on VI because flat instructions need to be emitted
6 ; instead of addr64 equivalent of the _OFFSET variants.
8 ; Check that moving the pointer out of the resource descriptor to
9 ; vaddr works for atomics.
11 declare i32 @llvm.amdgcn.workitem.id.x() #1
13 define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 {
14 ; GCN-LABEL: atomic_max_i32:
16 ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
17 ; GCN-NEXT: s_mov_b32 s11, 0xf000
18 ; GCN-NEXT: s_mov_b32 s10, 0
19 ; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0
20 ; GCN-NEXT: v_mov_b32_e32 v2, 0
21 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
22 ; GCN-NEXT: s_mov_b64 s[8:9], s[6:7]
23 ; GCN-NEXT: buffer_load_dwordx2 v[1:2], v[1:2], s[8:11], 0 addr64 glc
24 ; GCN-NEXT: s_waitcnt vmcnt(0)
25 ; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0
26 ; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc
27 ; GCN-NEXT: s_cbranch_execz .LBB0_2
28 ; GCN-NEXT: ; %bb.1: ; %atomic
29 ; GCN-NEXT: s_load_dword s0, s[0:1], 0xf
30 ; GCN-NEXT: s_mov_b32 s8, s10
31 ; GCN-NEXT: s_mov_b32 s9, s10
32 ; GCN-NEXT: s_mov_b32 s6, -1
33 ; GCN-NEXT: s_mov_b32 s7, s11
34 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
35 ; GCN-NEXT: v_mov_b32_e32 v0, s0
36 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
37 ; GCN-NEXT: buffer_atomic_smax v0, v[1:2], s[8:11], 0 addr64 offset:400 glc
38 ; GCN-NEXT: s_waitcnt vmcnt(0)
39 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
40 ; GCN-NEXT: .LBB0_2: ; %exit
42 %tid = call i32 @llvm.amdgcn.workitem.id.x()
43 %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid
44 %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep
45 %xor = xor i32 %tid, 1
46 %cmp = icmp ne i32 %xor, 0
47 br i1 %cmp, label %atomic, label %exit
50 %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
51 %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst
52 store i32 %ret, ptr addrspace(1) %out
59 define amdgpu_kernel void @atomic_max_i32_noret(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 {
60 ; GCN-LABEL: atomic_max_i32_noret:
62 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
63 ; GCN-NEXT: s_mov_b32 s7, 0xf000
64 ; GCN-NEXT: s_mov_b32 s6, 0
65 ; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0
66 ; GCN-NEXT: v_mov_b32_e32 v2, 0
67 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
68 ; GCN-NEXT: buffer_load_dwordx2 v[1:2], v[1:2], s[4:7], 0 addr64 glc
69 ; GCN-NEXT: s_waitcnt vmcnt(0)
70 ; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0
71 ; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc
72 ; GCN-NEXT: s_cbranch_execz .LBB1_2
73 ; GCN-NEXT: ; %bb.1: ; %atomic
74 ; GCN-NEXT: s_load_dword s0, s[0:1], 0xf
75 ; GCN-NEXT: s_mov_b32 s4, s6
76 ; GCN-NEXT: s_mov_b32 s5, s6
77 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
78 ; GCN-NEXT: v_mov_b32_e32 v0, s0
79 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
80 ; GCN-NEXT: buffer_atomic_smax v0, v[1:2], s[4:7], 0 addr64 offset:400
81 ; GCN-NEXT: .LBB1_2: ; %exit
83 %tid = call i32 @llvm.amdgcn.workitem.id.x()
84 %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid
85 %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep
86 %xor = xor i32 %tid, 1
87 %cmp = icmp ne i32 %xor, 0
88 br i1 %cmp, label %atomic, label %exit
91 %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
92 %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst
99 attributes #0 = { nounwind }
100 attributes #1 = { nounwind readnone }