1 ; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
2 ; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
4 ; The buffer_loads and buffer_stores all access the same location. Check they do
5 ; not get reordered by the scheduler.
7 ; GCN-LABEL: {{^}}_amdgpu_cs_main:
8 ; GCN: buffer_load_dword
9 ; GCN: buffer_store_dword
10 ; GCN: buffer_load_dword
11 ; GCN: buffer_store_dword
12 ; GCN: buffer_load_dword
13 ; GCN: buffer_store_dword
14 ; GCN: buffer_load_dword
15 ; GCN: buffer_store_dword
17 ; Function Attrs: nounwind
18 define amdgpu_cs void @_amdgpu_cs_main(<3 x i32> inreg %arg3, <3 x i32> %arg5) {
20 %tmp9 = add <3 x i32> %arg3, %arg5
21 %tmp10 = extractelement <3 x i32> %tmp9, i32 0
22 %tmp11 = shl i32 %tmp10, 2
23 %tmp12 = inttoptr i64 undef to ptr addrspace(4)
24 %tmp13 = load <4 x i32>, ptr addrspace(4) %tmp12, align 16
25 %tmp14 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp13, i32 0, i32 %tmp11, i1 false, i1 false) #0
26 %tmp17 = load <4 x i32>, ptr addrspace(4) %tmp12, align 16
27 call void @llvm.amdgcn.buffer.store.f32(float %tmp14, <4 x i32> %tmp17, i32 0, i32 %tmp11, i1 false, i1 false) #0
28 %tmp20 = load <4 x i32>, ptr addrspace(4) %tmp12, align 16
29 %tmp21 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 %tmp11, i1 false, i1 false) #0
30 %tmp22 = fadd reassoc nnan arcp contract float %tmp21, 1.000000e+00
31 call void @llvm.amdgcn.buffer.store.f32(float %tmp22, <4 x i32> %tmp20, i32 0, i32 %tmp11, i1 false, i1 false) #0
32 %tmp25 = load <4 x i32>, ptr addrspace(4) %tmp12, align 16
33 %tmp26 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp25, i32 0, i32 %tmp11, i1 false, i1 false) #0
34 %tmp27 = fadd reassoc nnan arcp contract float %tmp26, 1.000000e+00
35 call void @llvm.amdgcn.buffer.store.f32(float %tmp27, <4 x i32> %tmp25, i32 0, i32 %tmp11, i1 false, i1 false) #0
36 %tmp30 = load <4 x i32>, ptr addrspace(4) %tmp12, align 16
37 %tmp31 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp30, i32 0, i32 %tmp11, i1 false, i1 false) #0
38 %tmp32 = fadd reassoc nnan arcp contract float %tmp31, 1.000000e+00
39 call void @llvm.amdgcn.buffer.store.f32(float %tmp32, <4 x i32> %tmp30, i32 0, i32 %tmp11, i1 false, i1 false) #0
40 %tmp35 = load <4 x i32>, ptr addrspace(4) %tmp12, align 16
41 %tmp36 = call float @llvm.amdgcn.buffer.load.f32(<4 x i32> %tmp35, i32 0, i32 %tmp11, i1 false, i1 false) #0
42 %tmp37 = fadd reassoc nnan arcp contract float %tmp36, 1.000000e+00
43 call void @llvm.amdgcn.buffer.store.f32(float %tmp37, <4 x i32> %tmp35, i32 0, i32 %tmp11, i1 false, i1 false) #0
47 ; GCN-LABEL: {{^}}test1:
48 ; GCN: buffer_store_dword
49 ; GCN: buffer_load_dword
50 ; GCN: buffer_store_dword
51 define amdgpu_cs void @test1(<4 x i32> inreg %buf, i32 %off) {
53 call void @llvm.amdgcn.raw.buffer.store.i32(i32 0, <4 x i32> %buf, i32 8, i32 0, i32 0)
54 %val = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %buf, i32 %off, i32 0, i32 0)
55 call void @llvm.amdgcn.raw.buffer.store.i32(i32 %val, <4 x i32> %buf, i32 0, i32 0, i32 0)
59 ; GCN-LABEL: {{^}}test1_ptrs:
60 ; GCN: buffer_store_dword
61 ; GCN: buffer_load_dword
62 ; GCN: buffer_store_dword
63 define amdgpu_cs void @test1_ptrs(ptr addrspace(8) inreg %buf, i32 %off) {
65 call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 0, ptr addrspace(8) %buf, i32 8, i32 0, i32 0)
66 %val = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) %buf, i32 %off, i32 0, i32 0)
67 call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %val, ptr addrspace(8) %buf, i32 0, i32 0, i32 0)
71 ;; In the future, the stores should be reorderable because they'd be known to be
72 ;; at distinct offsets.
73 ; GCN-LABEL: {{^}}test1_ptrs_reorderable:
74 ; GCN: buffer_store_dword
75 ; GCN: buffer_load_dword
76 ; GCN: buffer_store_dword
77 define amdgpu_cs void @test1_ptrs_reorderable(ptr addrspace(8) inreg %buf, i32 %off) {
79 %shifted.off = shl i32 %off, 4
80 call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 0, ptr addrspace(8) %buf, i32 8, i32 0, i32 0)
81 %val = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) %buf, i32 %shifted.off, i32 0, i32 0)
82 call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %val, ptr addrspace(8) %buf, i32 0, i32 0, i32 0)
87 declare float @llvm.amdgcn.buffer.load.f32(<4 x i32>, i32, i32, i1, i1) #2
89 declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #3
91 declare i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32>, i32, i32, i32) #2
93 declare void @llvm.amdgcn.raw.buffer.store.i32(i32, <4 x i32>, i32, i32, i32) #3
95 declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) nocapture, i32, i32, i32) #4
97 declare void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32, ptr addrspace(8) nocapture, i32, i32, i32) #5
99 attributes #2 = { nounwind readonly }
100 attributes #3 = { nounwind writeonly }
101 attributes #4 = { nounwind memory(argmem: read) }
102 attributes #5 = { nounwind memory(argmem: write) }