1 ; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
2 ; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
4 ; The buffer_loads and buffer_stores all access the same location. Check they do
5 ; not get reordered by the scheduler.
7 ; GCN-LABEL: {{^}}_amdgpu_cs_main:
8 ; GCN: buffer_load_dword
9 ; GCN: buffer_store_dword
10 ; GCN: buffer_load_dword
11 ; GCN: buffer_store_dword
12 ; GCN: buffer_load_dword
13 ; GCN: buffer_store_dword
14 ; GCN: buffer_load_dword
15 ; GCN: buffer_store_dword
17 ; Function Attrs: nounwind
18 define amdgpu_cs void @_amdgpu_cs_main(<3 x i32> inreg %arg3, <3 x i32> %arg5) {
20 %tmp9 = add <3 x i32> %arg3, %arg5
21 %tmp10 = extractelement <3 x i32> %tmp9, i32 0
22 %tmp11 = shl i32 %tmp10, 2
23 %tmp12 = inttoptr i64 undef to <4 x i32> addrspace(4)*
24 %tmp13 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp12, align 16
25 %tmp14 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp13, i32 0, i32 %tmp11, i1 false, i1 false) #0
26 %tmp17 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp12, align 16
27 call void @llvm.amdgcn.buffer.store.f32(float %tmp14, <4 x i32> %tmp17, i32 0, i32 %tmp11, i1 false, i1 false) #0
28 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp12, align 16
29 %tmp21 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 %tmp11, i1 false, i1 false) #0
30 %tmp22 = fadd reassoc nnan arcp contract float %tmp21, 1.000000e+00
31 call void @llvm.amdgcn.buffer.store.f32(float %tmp22, <4 x i32> %tmp20, i32 0, i32 %tmp11, i1 false, i1 false) #0
32 %tmp25 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp12, align 16
33 %tmp26 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp25, i32 0, i32 %tmp11, i1 false, i1 false) #0
34 %tmp27 = fadd reassoc nnan arcp contract float %tmp26, 1.000000e+00
35 call void @llvm.amdgcn.buffer.store.f32(float %tmp27, <4 x i32> %tmp25, i32 0, i32 %tmp11, i1 false, i1 false) #0
36 %tmp30 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp12, align 16
37 %tmp31 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp30, i32 0, i32 %tmp11, i1 false, i1 false) #0
38 %tmp32 = fadd reassoc nnan arcp contract float %tmp31, 1.000000e+00
39 call void @llvm.amdgcn.buffer.store.f32(float %tmp32, <4 x i32> %tmp30, i32 0, i32 %tmp11, i1 false, i1 false) #0
40 %tmp35 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp12, align 16
41 %tmp36 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp35, i32 0, i32 %tmp11, i1 false, i1 false) #0
42 %tmp37 = fadd reassoc nnan arcp contract float %tmp36, 1.000000e+00
43 call void @llvm.amdgcn.buffer.store.f32(float %tmp37, <4 x i32> %tmp35, i32 0, i32 %tmp11, i1 false, i1 false) #0
47 declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #2
49 declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #3
51 attributes #2 = { nounwind readonly }
52 attributes #3 = { nounwind writeonly }