1 ; RUN: llc -mtriple=amdgcn -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX8 %s
2 ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
4 declare i64 @_Z13get_global_idj(i32)
6 define amdgpu_kernel void @clmem_read_simplified(i8 addrspace(1)* %buffer) {
7 ; GCN-LABEL: clmem_read_simplified:
8 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
9 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
10 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
11 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
12 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
13 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
14 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
15 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
17 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
18 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
19 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-4096
20 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
21 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
22 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
23 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
24 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
27 %call = tail call i64 @_Z13get_global_idj(i32 0)
28 %conv = and i64 %call, 255
29 %a0 = shl i64 %call, 7
30 %idx.ext11 = and i64 %a0, 4294934528
31 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
32 %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
34 %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 %conv
35 %load1 = load i64, i64 addrspace(1)* %addr1, align 8
36 %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 256
37 %load2 = load i64, i64 addrspace(1)* %addr2, align 8
38 %add.1 = add i64 %load2, %load1
40 %add.ptr8.2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 512
41 %load3 = load i64, i64 addrspace(1)* %add.ptr8.2, align 8
42 %add.2 = add i64 %load3, %add.1
43 %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 768
44 %load4 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
45 %add.3 = add i64 %load4, %add.2
47 %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1024
48 %load5 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
49 %add.4 = add i64 %load5, %add.3
50 %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1280
51 %load6 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
52 %add.5 = add i64 %load6, %add.4
54 %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1536
55 %load7 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
56 %add.6 = add i64 %load7, %add.5
57 %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1792
58 %load8 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
59 %add.7 = add i64 %load8, %add.6
61 store i64 %add.7, i64 addrspace(1)* %saddr, align 8
65 define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)* %buffer) {
66 ; GCN-LABEL: clmem_read:
67 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
68 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
69 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
70 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
71 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
72 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
73 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
74 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
75 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
76 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
77 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
79 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-4096
80 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
81 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
82 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
83 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-4096
84 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-2048
85 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
86 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
87 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off
88 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-4096
89 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off
91 %call = tail call i64 @_Z13get_global_idj(i32 0)
92 %conv = and i64 %call, 255
93 %a0 = shl i64 %call, 17
94 %idx.ext11 = and i64 %a0, 4261412864
95 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
96 %a1 = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
97 %add.ptr6 = getelementptr inbounds i64, i64 addrspace(1)* %a1, i64 %conv
98 br label %for.cond.preheader
100 while.cond.loopexit: ; preds = %for.body
101 %dec = add nsw i32 %dec31, -1
102 %tobool = icmp eq i32 %dec31, 0
103 br i1 %tobool, label %while.end, label %for.cond.preheader
105 for.cond.preheader: ; preds = %entry, %while.cond.loopexit
106 %dec31 = phi i32 [ 127, %entry ], [ %dec, %while.cond.loopexit ]
107 %sum.030 = phi i64 [ 0, %entry ], [ %add.10, %while.cond.loopexit ]
110 for.body: ; preds = %for.body, %for.cond.preheader
111 %block.029 = phi i32 [ 0, %for.cond.preheader ], [ %add9.31, %for.body ]
112 %sum.128 = phi i64 [ %sum.030, %for.cond.preheader ], [ %add.10, %for.body ]
113 %conv3 = zext i32 %block.029 to i64
114 %add.ptr8 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3
115 %load1 = load i64, i64 addrspace(1)* %add.ptr8, align 8
116 %add = add i64 %load1, %sum.128
118 %add9 = or i32 %block.029, 256
119 %conv3.1 = zext i32 %add9 to i64
120 %add.ptr8.1 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.1
121 %load2 = load i64, i64 addrspace(1)* %add.ptr8.1, align 8
122 %add.1 = add i64 %load2, %add
124 %add9.1 = or i32 %block.029, 512
125 %conv3.2 = zext i32 %add9.1 to i64
126 %add.ptr8.2 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.2
127 %l3 = load i64, i64 addrspace(1)* %add.ptr8.2, align 8
128 %add.2 = add i64 %l3, %add.1
130 %add9.2 = or i32 %block.029, 768
131 %conv3.3 = zext i32 %add9.2 to i64
132 %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.3
133 %l4 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
134 %add.3 = add i64 %l4, %add.2
136 %add9.3 = or i32 %block.029, 1024
137 %conv3.4 = zext i32 %add9.3 to i64
138 %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.4
139 %l5 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
140 %add.4 = add i64 %l5, %add.3
142 %add9.4 = or i32 %block.029, 1280
143 %conv3.5 = zext i32 %add9.4 to i64
144 %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.5
145 %l6 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
146 %add.5 = add i64 %l6, %add.4
148 %add9.5 = or i32 %block.029, 1536
149 %conv3.6 = zext i32 %add9.5 to i64
150 %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.6
151 %load7 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
152 %add.6 = add i64 %load7, %add.5
154 %add9.6 = or i32 %block.029, 1792
155 %conv3.7 = zext i32 %add9.6 to i64
156 %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.7
157 %load8 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
158 %add.7 = add i64 %load8, %add.6
160 %add9.7 = or i32 %block.029, 2048
161 %conv3.8 = zext i32 %add9.7 to i64
162 %add.ptr8.8 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.8
163 %load9 = load i64, i64 addrspace(1)* %add.ptr8.8, align 8
164 %add.8 = add i64 %load9, %add.7
166 %add9.8 = or i32 %block.029, 2304
167 %conv3.9 = zext i32 %add9.8 to i64
168 %add.ptr8.9 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.9
169 %load10 = load i64, i64 addrspace(1)* %add.ptr8.9, align 8
170 %add.9 = add i64 %load10, %add.8
172 %add9.9 = or i32 %block.029, 2560
173 %conv3.10 = zext i32 %add9.9 to i64
174 %add.ptr8.10 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.10
175 %load11 = load i64, i64 addrspace(1)* %add.ptr8.10, align 8
176 %add.10 = add i64 %load11, %add.9
178 %add9.31 = add nuw nsw i32 %block.029, 8192
179 %cmp.31 = icmp ult i32 %add9.31, 4194304
180 br i1 %cmp.31, label %for.body, label %while.cond.loopexit
182 while.end: ; preds = %while.cond.loopexit
183 store i64 %add.10, i64 addrspace(1)* %a1, align 8
187 ; using 32bit address.
188 define amdgpu_kernel void @Address32(i8 addrspace(1)* %buffer) {
189 ; GCN-LABEL: Address32:
190 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
191 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
192 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
193 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
194 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
195 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
196 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
197 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
198 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
199 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
201 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:1024
202 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off{{$}}
203 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:1024
204 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:2048
205 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:3072
206 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off{{$}}
207 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:1024
208 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:2048
209 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:3072
210 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off{{$}}
212 %call = tail call i64 @_Z13get_global_idj(i32 0)
213 %conv = and i64 %call, 255
214 %id = shl i64 %call, 7
215 %idx.ext11 = and i64 %id, 4294934528
216 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
217 %addr = bitcast i8 addrspace(1)* %add.ptr12 to i32 addrspace(1)*
219 %add.ptr6 = getelementptr inbounds i32, i32 addrspace(1)* %addr, i64 %conv
220 %load1 = load i32, i32 addrspace(1)* %add.ptr6, align 4
222 %add.ptr8.1 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 256
223 %load2 = load i32, i32 addrspace(1)* %add.ptr8.1, align 4
224 %add.1 = add i32 %load2, %load1
226 %add.ptr8.2 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 512
227 %load3 = load i32, i32 addrspace(1)* %add.ptr8.2, align 4
228 %add.2 = add i32 %load3, %add.1
230 %add.ptr8.3 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 768
231 %load4 = load i32, i32 addrspace(1)* %add.ptr8.3, align 4
232 %add.3 = add i32 %load4, %add.2
234 %add.ptr8.4 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1024
235 %load5 = load i32, i32 addrspace(1)* %add.ptr8.4, align 4
236 %add.4 = add i32 %load5, %add.3
238 %add.ptr8.5 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1280
239 %load6 = load i32, i32 addrspace(1)* %add.ptr8.5, align 4
240 %add.5 = add i32 %load6, %add.4
242 %add.ptr8.6 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1536
243 %load7 = load i32, i32 addrspace(1)* %add.ptr8.6, align 4
244 %add.6 = add i32 %load7, %add.5
246 %add.ptr8.7 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1792
247 %load8 = load i32, i32 addrspace(1)* %add.ptr8.7, align 4
248 %add.7 = add i32 %load8, %add.6
250 %add.ptr8.8 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 2048
251 %load9 = load i32, i32 addrspace(1)* %add.ptr8.8, align 4
252 %add.8 = add i32 %load9, %add.7
254 %add.ptr8.9 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 2304
255 %load10 = load i32, i32 addrspace(1)* %add.ptr8.9, align 4
256 %add.9 = add i32 %load10, %add.8
258 store i32 %add.9, i32 addrspace(1)* %addr, align 4
262 define amdgpu_kernel void @Offset64(i8 addrspace(1)* %buffer) {
263 ; GCN-LABEL: Offset64:
264 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
265 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
266 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
267 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
269 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
270 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
271 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-4096
272 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
274 %call = tail call i64 @_Z13get_global_idj(i32 0)
275 %conv = and i64 %call, 255
276 %a0 = shl i64 %call, 7
277 %idx.ext11 = and i64 %a0, 4294934528
278 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
279 %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
281 %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 %conv
282 %load1 = load i64, i64 addrspace(1)* %addr1, align 8
284 %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 536870400
285 %load2 = load i64, i64 addrspace(1)* %addr2, align 8
287 %add1 = add i64 %load2, %load1
289 %addr3 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 536870656
290 %load3 = load i64, i64 addrspace(1)* %addr3, align 8
292 %add2 = add i64 %load3, %add1
294 %addr4 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 536870912
295 %load4 = load i64, i64 addrspace(1)* %addr4, align 8
296 %add4 = add i64 %load4, %add2
298 store i64 %add4, i64 addrspace(1)* %saddr, align 8
302 ; TODO: Support load4 as anchor instruction.
303 define amdgpu_kernel void @p32Offset64(i8 addrspace(1)* %buffer) {
304 ; GCN-LABEL: p32Offset64:
305 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
306 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
307 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
308 ; GFX8: flat_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
310 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:2048
311 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off offset:3072
312 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off{{$}}
313 ; GFX9: global_load_dword {{v[0-9]+}}, v[{{[0-9]+:[0-9]+}}], off{{$}}
315 %call = tail call i64 @_Z13get_global_idj(i32 0)
316 %conv = and i64 %call, 255
317 %a0 = shl i64 %call, 7
318 %idx.ext11 = and i64 %a0, 4294934528
319 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
320 %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i32 addrspace(1)*
322 %addr1 = getelementptr inbounds i32, i32 addrspace(1)* %saddr, i64 %conv
323 %load1 = load i32, i32 addrspace(1)* %addr1, align 8
325 %addr2 = getelementptr inbounds i32, i32 addrspace(1)* %addr1, i64 536870400
326 %load2 = load i32, i32 addrspace(1)* %addr2, align 8
328 %add1 = add i32 %load2, %load1
330 %addr3 = getelementptr inbounds i32, i32 addrspace(1)* %addr1, i64 536870656
331 %load3 = load i32, i32 addrspace(1)* %addr3, align 8
333 %add2 = add i32 %load3, %add1
335 %addr4 = getelementptr inbounds i32, i32 addrspace(1)* %addr1, i64 536870912
336 %load4 = load i32, i32 addrspace(1)* %addr4, align 8
337 %add4 = add i32 %load4, %add2
339 store i32 %add4, i32 addrspace(1)* %saddr, align 8
343 define amdgpu_kernel void @DiffBase(i8 addrspace(1)* %buffer1,
344 ; GCN-LABEL: DiffBase:
345 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
346 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
347 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
348 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
349 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
350 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
352 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
353 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
354 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
355 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
356 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:-4096
357 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
358 i8 addrspace(1)* %buffer2) {
360 %call = tail call i64 @_Z13get_global_idj(i32 0)
361 %conv = and i64 %call, 255
362 %a0 = shl i64 %call, 7
363 %idx.ext11 = and i64 %a0, 4294934528
364 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer1, i64 %idx.ext11
365 %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
367 %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %buffer2, i64 %idx.ext11
368 %saddr2 = bitcast i8 addrspace(1)* %add.ptr2 to i64 addrspace(1)*
370 %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 512
371 %load1 = load i64, i64 addrspace(1)* %addr1, align 8
372 %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 768
373 %load2 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
374 %add1 = add i64 %load2, %load1
375 %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 1024
376 %load3 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
377 %add2 = add i64 %load3, %add1
379 %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %saddr2, i64 1280
380 %load4 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
382 %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %saddr2, i64 1536
383 %load5 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
384 %add3 = add i64 %load5, %load4
386 %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %saddr2, i64 1792
387 %load6 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
388 %add4 = add i64 %load6, %add3
390 %add5 = add i64 %add2, %add4
392 store i64 %add5, i64 addrspace(1)* %saddr, align 8
396 define amdgpu_kernel void @ReverseOrder(i8 addrspace(1)* %buffer) {
397 ; GCN-LABEL: ReverseOrder:
398 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
399 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
400 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
401 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
402 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
403 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
404 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
405 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
407 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
408 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
409 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
410 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
411 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
412 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
413 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off{{$}}
414 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
416 %call = tail call i64 @_Z13get_global_idj(i32 0)
417 %conv = and i64 %call, 255
418 %a0 = shl i64 %call, 7
419 %idx.ext11 = and i64 %a0, 4294934528
420 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
421 %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
423 %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 %conv
424 %load1 = load i64, i64 addrspace(1)* %addr1, align 8
426 %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1792
427 %load8 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
428 %add7 = add i64 %load8, %load1
430 %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1536
431 %load7 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
432 %add6 = add i64 %load7, %add7
434 %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1280
435 %load6 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
436 %add5 = add i64 %load6, %add6
438 %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1024
439 %load5 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
440 %add4 = add i64 %load5, %add5
442 %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 768
443 %load4 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
444 %add3 = add i64 %load4, %add4
446 %add.ptr8.2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 512
447 %load3 = load i64, i64 addrspace(1)* %add.ptr8.2, align 8
448 %add2 = add i64 %load3, %add3
450 %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 256
451 %load2 = load i64, i64 addrspace(1)* %addr2, align 8
452 %add1 = add i64 %load2, %add2
454 store i64 %add1, i64 addrspace(1)* %saddr, align 8
458 define hidden amdgpu_kernel void @negativeoffset(i8 addrspace(1)* nocapture %buffer) {
459 ; GCN-LABEL: negativeoffset:
460 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
461 ; GFX8: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
463 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off
464 ; GFX9: global_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}], off offset:2048
466 %call = tail call i64 @_Z13get_global_idj(i32 0) #2
467 %conv = and i64 %call, 255
468 %0 = shl i64 %call, 7
469 %idx.ext11 = and i64 %0, 4294934528
470 %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
471 %buffer_head = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
473 %buffer_wave = getelementptr inbounds i64, i64 addrspace(1)* %buffer_head, i64 %conv
475 %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %buffer_wave, i64 -536870656
476 %load1 = load i64, i64 addrspace(1)* %addr1, align 8
478 %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %buffer_wave, i64 -536870912
479 %load2 = load i64, i64 addrspace(1)* %addr2, align 8
482 %add = add i64 %load2, %load1
484 store i64 %add, i64 addrspace(1)* %buffer_head, align 8