1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK
4 define amdgpu_ps void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
5 ; CHECK-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
7 ; CHECK-NEXT: s_mov_b32 s11, s5
8 ; CHECK-NEXT: s_mov_b32 s10, s4
9 ; CHECK-NEXT: s_mov_b32 s9, s3
10 ; CHECK-NEXT: s_mov_b32 s8, s2
11 ; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[8:11], s6 offen
12 ; CHECK-NEXT: s_endpgm
13 %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24)
17 define amdgpu_ps void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 inreg %soffset) {
18 ; CHECK-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
20 ; CHECK-NEXT: s_mov_b32 s11, s5
21 ; CHECK-NEXT: s_mov_b32 s10, s4
22 ; CHECK-NEXT: s_mov_b32 s9, s3
23 ; CHECK-NEXT: s_mov_b32 s8, s2
24 ; CHECK-NEXT: buffer_atomic_add_f32 v0, off, s[8:11], s6
25 ; CHECK-NEXT: s_endpgm
26 %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0)
30 define amdgpu_ps void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
31 ; CHECK-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
33 ; CHECK-NEXT: s_mov_b32 s11, s5
34 ; CHECK-NEXT: s_mov_b32 s10, s4
35 ; CHECK-NEXT: s_mov_b32 s9, s3
36 ; CHECK-NEXT: s_mov_b32 s8, s2
37 ; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[8:11], s6 offen
38 ; CHECK-NEXT: s_endpgm
39 %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0)
43 define amdgpu_ps void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
44 ; CHECK-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
46 ; CHECK-NEXT: s_mov_b32 s11, s5
47 ; CHECK-NEXT: s_mov_b32 s10, s4
48 ; CHECK-NEXT: s_mov_b32 s9, s3
49 ; CHECK-NEXT: s_mov_b32 s8, s2
50 ; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, off, s[8:11], s6 offset:92
51 ; CHECK-NEXT: s_endpgm
52 %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0)
56 define amdgpu_ps void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
57 ; CHECK-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
59 ; CHECK-NEXT: s_mov_b32 s11, s5
60 ; CHECK-NEXT: s_mov_b32 s10, s4
61 ; CHECK-NEXT: s_mov_b32 s9, s3
62 ; CHECK-NEXT: s_mov_b32 s8, s2
63 ; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[8:11], s6 offen slc
64 ; CHECK-NEXT: s_endpgm
65 %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2)
69 declare float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float, ptr addrspace(8), i32, i32, i32 immarg) #0
70 declare <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half>, ptr addrspace(8), i32, i32, i32 immarg) #0
72 attributes #0 = { nounwind }