1 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-enable-global-sgpr-addr < %s | FileCheck -check-prefix=GFX9 %s
3 ; Test for a conv2d like sequence of loads.
5 ; GFX9: global_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:16{{$}}
6 ; GFX9: global_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}{{$}}
7 ; GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:32{{$}}
8 ; GFX9: global_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:-16{{$}}
9 ; GFX9: global_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:-32{{$}}
10 ; GFX9: global_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:8{{$}}
12 define hidden amdgpu_kernel void @simpleSaddrs(i64 addrspace(1)* %dst_image, i64 addrspace(1)* %src_image ) {
14 %id = call i32 @llvm.amdgcn.workitem.id.x()
15 %idx = zext i32 %id to i64
16 %gep = getelementptr i64, i64 addrspace(1)* %src_image, i64 %idx
17 %ptr0 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 1
18 %load0 = load i64, i64 addrspace(1)* %ptr0
19 %ptr1 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 2
20 %load1 = load i64, i64 addrspace(1)* %ptr1
21 %ptr2 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 3
22 %load2 = load i64, i64 addrspace(1)* %ptr2
23 %ptr3 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 4
24 %load3 = load i64, i64 addrspace(1)* %ptr3
25 %ptr4 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 -4
26 %load4 = load i64, i64 addrspace(1)* %ptr4
27 %ptr5 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 -3
28 %load5 = load i64, i64 addrspace(1)* %ptr5
29 %ptr6 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 -2
30 %load6 = load i64, i64 addrspace(1)* %ptr6
31 %ptr7 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 -1
32 %load7 = load i64, i64 addrspace(1)* %ptr7
33 %ptr8 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 0
34 %load8 = load i64, i64 addrspace(1)* %ptr8
35 %add0 = add i64 %load1, %load0
36 %add1 = add i64 %load3, %load2
37 %add2 = add i64 %load5, %load4
38 %add3 = add i64 %load7, %load6
39 %add4 = add i64 %add0, %load8
40 %add5 = add i64 %add2, %add1
41 %add6 = add i64 %add4, %add3
42 %add7 = add i64 %add6, %add5
43 %gep9 = getelementptr i64, i64 addrspace(1)* %dst_image, i64 %idx
44 %ptr9 = getelementptr inbounds i64, i64 addrspace(1)* %gep9, i64 1
45 store volatile i64 %add7, i64 addrspace(1)* %ptr9
47 ; Test various offset boundaries.
48 ; GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+}}:{{[0-9]+}}] offset:4088{{$}}
49 ; GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, off{{$}}
50 ; GFX9: global_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+}}:{{[0-9]+}}] offset:2040{{$}}
51 %gep11 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 511
52 %load11 = load i64, i64 addrspace(1)* %gep11
53 %gep12 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 1023
54 %load12 = load i64, i64 addrspace(1)* %gep12
55 %gep13 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 255
56 %load13 = load i64, i64 addrspace(1)* %gep13
57 %add11 = add i64 %load11, %load12
58 %add12 = add i64 %add11, %load13
59 store volatile i64 %add12, i64 addrspace(1)* undef
61 ; GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, off{{$}}
62 ; GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, off{{$}}
63 ; GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+}}:{{[0-9]+}}] offset:-4096{{$}}
64 %gep21 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 -1024
65 %load21 = load i64, i64 addrspace(1)* %gep21
66 %gep22 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 -2048
67 %load22 = load i64, i64 addrspace(1)* %gep22
68 %gep23 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 -512
69 %load23 = load i64, i64 addrspace(1)* %gep23
70 %add21 = add i64 %load22, %load21
71 %add22 = add i64 %add21, %load23
72 store volatile i64 %add22, i64 addrspace(1)* undef
74 ; GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+}}:{{[0-9]+}}] offset:2040{{$}}
75 %gep31 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 257
76 %load31 = load i64, i64 addrspace(1)* %gep31
77 %gep32 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 256
78 %load32 = load i64, i64 addrspace(1)* %gep32
79 %gep33 = getelementptr inbounds i64, i64 addrspace(1)* %gep, i64 255
80 %load33 = load i64, i64 addrspace(1)* %gep33
81 %add34 = add i64 %load32, %load31
82 %add35 = add i64 %add34, %load33
83 store volatile i64 %add35, i64 addrspace(1)* undef
87 ; GFX9: global_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, off{{$}}
88 ; GFX9: global_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:16{{$}}
89 ; GFX9-NEXT: s_waitcnt
90 ; NGFX9-NOT: global_load_dword
92 define amdgpu_cs void @_amdgpu_cs_main(i64 inreg %arg) {
94 %tmp1 = inttoptr i64 %arg to <4 x i64> addrspace(1)*
95 %tmp2 = load <4 x i64>, <4 x i64> addrspace(1)* %tmp1, align 16
96 store volatile <4 x i64> %tmp2, <4 x i64> addrspace(1)* undef
100 declare i32 @llvm.amdgcn.workitem.id.x() #1
101 attributes #0 = { convergent nounwind }
102 attributes #1 = { nounwind readnone speculatable }