1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -march=amdgcn -mcpu=fiji -O0 -stop-after=irtranslator -o - %s | FileCheck %s
4 define float @test_atomicrmw_fadd(ptr addrspace(3) %addr) {
5 ; CHECK-LABEL: name: test_atomicrmw_fadd
6 ; CHECK: bb.1 (%ir-block.0):
7 ; CHECK-NEXT: liveins: $vgpr0
9 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
10 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
11 ; CHECK-NEXT: [[ATOMICRMW_FADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_FADD [[COPY]](p3), [[C]] :: (load store seq_cst (s32) on %ir.addr, addrspace 3)
12 ; CHECK-NEXT: $vgpr0 = COPY [[ATOMICRMW_FADD]](s32)
13 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
14 %oldval = atomicrmw fadd ptr addrspace(3) %addr, float 1.0 seq_cst
18 define float @test_atomicrmw_fsub(ptr addrspace(3) %addr) {
19 ; CHECK-LABEL: name: test_atomicrmw_fsub
20 ; CHECK: bb.1 (%ir-block.0):
21 ; CHECK-NEXT: successors: %bb.2(0x80000000)
22 ; CHECK-NEXT: liveins: $vgpr0
24 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
25 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
26 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
27 ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32) from %ir.addr, addrspace 3)
28 ; CHECK-NEXT: G_BR %bb.2
30 ; CHECK-NEXT: bb.2.atomicrmw.start:
31 ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
33 ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI %16(s64), %bb.2, [[C1]](s64), %bb.1
34 ; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[LOAD]](s32), %bb.1, %14(s32), %bb.2
35 ; CHECK-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[PHI1]], [[C]]
36 ; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[PHI1]], [[FSUB]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
37 ; CHECK-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:_(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
38 ; CHECK-NEXT: [[INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:%[0-9]+]]:_(s1) = G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INTRINSIC_CONVERGENT]](s64)
39 ; CHECK-NEXT: G_BRCOND [[INTRINSIC_CONVERGENT_W_SIDE_EFFECTS]](s1), %bb.3
40 ; CHECK-NEXT: G_BR %bb.2
42 ; CHECK-NEXT: bb.3.atomicrmw.end:
43 ; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32), %bb.2
44 ; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INTRINSIC_CONVERGENT]](s64), %bb.2
45 ; CHECK-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
46 ; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](s32)
47 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
48 %oldval = atomicrmw fsub ptr addrspace(3) %addr, float 1.0 seq_cst