1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck --check-prefix=GCN %s
4 ; Test using saddr addressing mode of global_* flat atomic instructions.
6 ; --------------------------------------------------------------------------------
7 ; amdgcn global atomic fadd
8 ; --------------------------------------------------------------------------------
10 define amdgpu_ps void @global_fadd_saddr_f32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, float %data) {
11 ; GCN-LABEL: global_fadd_saddr_f32_nortn:
13 ; GCN-NEXT: global_atomic_add_f32 v0, v1, s[2:3]
15 %zext.offset = zext i32 %voffset to i64
16 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
17 %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to float addrspace(1)*
18 %ret = call float @llvm.amdgcn.global.atomic.fadd.f32.p1f32(float addrspace(1)* %cast.gep0, float %data)
22 define amdgpu_ps void @global_fadd_saddr_f32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, float %data) {
23 ; GCN-LABEL: global_fadd_saddr_f32_nortn_neg128:
25 ; GCN-NEXT: global_atomic_add_f32 v0, v1, s[2:3] offset:-128
27 %zext.offset = zext i32 %voffset to i64
28 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
29 %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
30 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to float addrspace(1)*
31 %ret = call float @llvm.amdgcn.global.atomic.fadd.f32.p1f32(float addrspace(1)* %cast.gep1, float %data)
35 define amdgpu_ps void @global_fadd_saddr_v2f16_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, <2 x half> %data) {
36 ; GCN-LABEL: global_fadd_saddr_v2f16_nortn:
38 ; GCN-NEXT: global_atomic_pk_add_f16 v0, v1, s[2:3]
40 %zext.offset = zext i32 %voffset to i64
41 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
42 %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to <2 x half> addrspace(1)*
43 %ret = call <2 x half> @llvm.amdgcn.global.atomic.fadd.v2f16.p1v2f16(<2 x half> addrspace(1)* %cast.gep0, <2 x half> %data)
47 define amdgpu_ps void @global_fadd_saddr_v2f16_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, <2 x half> %data) {
48 ; GCN-LABEL: global_fadd_saddr_v2f16_nortn_neg128:
50 ; GCN-NEXT: global_atomic_pk_add_f16 v0, v1, s[2:3] offset:-128
52 %zext.offset = zext i32 %voffset to i64
53 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset
54 %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128
55 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to <2 x half> addrspace(1)*
56 %ret = call <2 x half> @llvm.amdgcn.global.atomic.fadd.v2f16.p1v2f16(<2 x half> addrspace(1)* %cast.gep1, <2 x half> %data)
60 declare float @llvm.amdgcn.global.atomic.fadd.f32.p1f32(float addrspace(1)* nocapture, float) #0
61 declare <2 x half> @llvm.amdgcn.global.atomic.fadd.v2f16.p1v2f16(<2 x half> addrspace(1)* nocapture, <2 x half>) #0
63 attributes #0 = { argmemonly nounwind willreturn }