1 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=SI %s
2 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=CI -check-prefix=CI-NOHSA %s
3 ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn--amdhsa -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI --check-prefix=GCN-HSA %s
5 declare i32 @llvm.amdgcn.workitem.id.x() #0
6 declare i32 @llvm.amdgcn.workitem.id.y() #0
8 ; In this test both the pointer and the offset operands to the
9 ; BUFFER_LOAD instructions end up being stored in vgprs. This
10 ; requires us to add the pointer and offset together, store the
11 ; result in the offset operand (vaddr), and then store 0 in an
12 ; sgpr register pair and use that for the pointer operand
13 ; (low 64-bits of srsrc).
15 ; GCN-LABEL: {{^}}mubuf:
17 ; Make sure we aren't using VGPRs for the source operand of s_mov_b64
18 ; GCN-NOT: s_mov_b64 s[{{[0-9]+:[0-9]+}}], v
20 ; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_*
22 ; GCN-NOHSA: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
23 ; GCN-NOHSA: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
24 ; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
25 ; GCN-HSA: flat_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}
27 define amdgpu_kernel void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
29 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
30 %tmp1 = call i32 @llvm.amdgcn.workitem.id.y()
31 %tmp2 = sext i32 %tmp to i64
32 %tmp3 = sext i32 %tmp1 to i64
35 loop: ; preds = %loop, %entry
36 %tmp4 = phi i64 [ 0, %entry ], [ %tmp5, %loop ]
37 %tmp5 = add i64 %tmp2, %tmp4
38 %tmp6 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp5
39 %tmp7 = load i8, i8 addrspace(1)* %tmp6, align 1
40 %tmp8 = or i64 %tmp5, 1
41 %tmp9 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp8
42 %tmp10 = load i8, i8 addrspace(1)* %tmp9, align 1
43 %tmp11 = add i8 %tmp7, %tmp10
44 %tmp12 = sext i8 %tmp11 to i32
45 store i32 %tmp12, i32 addrspace(1)* %out
46 %tmp13 = icmp slt i64 %tmp5, 10
47 br i1 %tmp13, label %loop, label %done
53 ; Test moving an SMRD instruction to the VALU
54 ; FIXME: movs can be moved before nop to reduce count
56 ; GCN-LABEL: {{^}}smrd_valu:
57 ; SI: s_movk_i32 [[OFFSET:s[0-9]+]], 0x2ee0
59 ; GCN: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
60 ; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
62 ; SI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, [[OFFSET]]
64 ; CI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xbb8
65 ; GCN: v_mov_b32_e32 [[V_OUT:v[0-9]+]], [[OUT]]
66 ; GCN-NOHSA: buffer_store_dword [[V_OUT]]
67 ; GCN-HSA: flat_store_dword {{.*}}, [[V_OUT]]
68 define amdgpu_kernel void @smrd_valu(i32 addrspace(4)* addrspace(1)* %in, i32 %a, i32 %b, i32 addrspace(1)* %out) #1 {
70 %tmp = icmp ne i32 %a, 0
71 br i1 %tmp, label %if, label %else
74 %tmp1 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(1)* %in
77 else: ; preds = %entry
78 %tmp2 = getelementptr i32 addrspace(4)*, i32 addrspace(4)* addrspace(1)* %in
79 %tmp3 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(1)* %tmp2
82 endif: ; preds = %else, %if
83 %tmp4 = phi i32 addrspace(4)* [ %tmp1, %if ], [ %tmp3, %else ]
84 %tmp5 = getelementptr i32, i32 addrspace(4)* %tmp4, i32 3000
85 %tmp6 = load i32, i32 addrspace(4)* %tmp5
86 store i32 %tmp6, i32 addrspace(1)* %out
90 ; Test moving an SMRD with an immediate offset to the VALU
92 ; GCN-LABEL: {{^}}smrd_valu2:
93 ; GCN-NOHSA-NOT: v_add
94 ; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16{{$}}
95 ; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
96 define amdgpu_kernel void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(4)* %in) #1 {
98 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
99 %tmp1 = add i32 %tmp, 4
100 %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(4)* %in, i32 %tmp, i32 4
101 %tmp3 = load i32, i32 addrspace(4)* %tmp2
102 store i32 %tmp3, i32 addrspace(1)* %out
106 ; Use a big offset that will use the SMRD literal offset on CI
107 ; GCN-LABEL: {{^}}smrd_valu_ci_offset:
108 ; GCN-NOHSA-NOT: v_add
109 ; GCN-NOHSA: s_movk_i32 [[OFFSET:s[0-9]+]], 0x4e20{{$}}
110 ; GCN-NOHSA-NOT: v_add
111 ; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
112 ; GCN-NOHSA: v_add_i32_e32
113 ; GCN-NOHSA: buffer_store_dword
114 ; GCN-HSA: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
115 ; GCN-HSA: flat_store_dword v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}
116 define amdgpu_kernel void @smrd_valu_ci_offset(i32 addrspace(1)* %out, i32 addrspace(4)* %in, i32 %c) #1 {
118 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
119 %tmp2 = getelementptr i32, i32 addrspace(4)* %in, i32 %tmp
120 %tmp3 = getelementptr i32, i32 addrspace(4)* %tmp2, i32 5000
121 %tmp4 = load i32, i32 addrspace(4)* %tmp3
122 %tmp5 = add i32 %tmp4, %c
123 store i32 %tmp5, i32 addrspace(1)* %out
127 ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x2:
128 ; GCN-NOHSA-NOT: v_add
129 ; GCN-NOHSA: s_mov_b32 [[OFFSET:s[0-9]+]], 0x9c40{{$}}
130 ; GCN-NOHSA-NOT: v_add
131 ; GCN-NOHSA: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
132 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
133 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
134 ; GCN-NOHSA: buffer_store_dwordx2
135 ; GCN-HSA: flat_load_dwordx2 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
136 define amdgpu_kernel void @smrd_valu_ci_offset_x2(i64 addrspace(1)* %out, i64 addrspace(4)* %in, i64 %c) #1 {
138 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
139 %tmp2 = getelementptr i64, i64 addrspace(4)* %in, i32 %tmp
140 %tmp3 = getelementptr i64, i64 addrspace(4)* %tmp2, i32 5000
141 %tmp4 = load i64, i64 addrspace(4)* %tmp3
142 %tmp5 = or i64 %tmp4, %c
143 store i64 %tmp5, i64 addrspace(1)* %out
147 ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x4:
148 ; GCN-NOHSA-NOT: v_add
149 ; GCN-NOHSA: s_movk_i32 [[OFFSET:s[0-9]+]], 0x4d20{{$}}
150 ; GCN-NOHSA-NOT: v_add
151 ; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET]] addr64{{$}}
152 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
153 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
154 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
155 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
156 ; GCN-NOHSA: buffer_store_dwordx4
157 ; GCN-HSA: flat_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]
158 define amdgpu_kernel void @smrd_valu_ci_offset_x4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(4)* %in, <4 x i32> %c) #1 {
160 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
161 %tmp2 = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %in, i32 %tmp
162 %tmp3 = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %tmp2, i32 1234
163 %tmp4 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp3
164 %tmp5 = or <4 x i32> %tmp4, %c
165 store <4 x i32> %tmp5, <4 x i32> addrspace(1)* %out
169 ; Original scalar load uses SGPR offset on SI and 32-bit literal on
172 ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x8:
173 ; GCN-NOHSA: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x9a40{{$}}
174 ; GCN-NOHSA-NOT: v_add
175 ; CI-NOHSA: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x9a50{{$}}
176 ; CI-NOHSA-NOT: v_add
177 ; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16
178 ; CI-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET1]] addr64{{$}}
179 ; GCN-NOHSA: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
181 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
182 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
183 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
184 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
185 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
186 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
187 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
188 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
189 ; GCN-NOHSA: buffer_store_dwordx4
190 ; GCN-NOHSA: buffer_store_dwordx4
191 ; GCN-HSA: flat_load_dwordx4
192 ; GCN-HSA: flat_load_dwordx4
193 define amdgpu_kernel void @smrd_valu_ci_offset_x8(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(4)* %in, <8 x i32> %c) #1 {
195 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
196 %tmp2 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %in, i32 %tmp
197 %tmp3 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %tmp2, i32 1234
198 %tmp4 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp3
199 %tmp5 = or <8 x i32> %tmp4, %c
200 store <8 x i32> %tmp5, <8 x i32> addrspace(1)* %out
204 ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x16:
206 ; SI: s_mov_b32 {{s[0-9]+}}, 0x13480
207 ; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:16
208 ; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:32
209 ; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:48
210 ; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], {{s[0-9]+}} addr64
211 ; CI-NOHSA-DAG: s_mov_b32 [[OFFSET0:s[0-9]+]], 0x13480{{$}}
212 ; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET0]] addr64{{$}}
213 ; CI-NOHSA-DAG: s_mov_b32 [[OFFSET1:s[0-9]+]], 0x13490{{$}}
214 ; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET1]] addr64{{$}}
215 ; CI-NOHSA-DAG: s_mov_b32 [[OFFSET2:s[0-9]+]], 0x134a0{{$}}
216 ; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET2]] addr64{{$}}
217 ; CI-NOHSA-DAG: s_mov_b32 [[OFFSET3:s[0-9]+]], 0x134b0{{$}}
218 ; CI-NOHSA-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s[{{[0-9]+:[0-9]+}}], [[OFFSET3]] addr64{{$}}
220 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
221 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
222 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
223 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
224 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
225 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
226 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
227 ; GCN-NOHSA: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
228 ; GCN-NOHSA: buffer_store_dwordx4
229 ; GCN-NOHSA: buffer_store_dwordx4
230 ; GCN-NOHSA: buffer_store_dwordx4
231 ; GCN-NOHSA: buffer_store_dwordx4
233 ; GCN-HSA: flat_load_dwordx4
234 ; GCN-HSA: flat_load_dwordx4
235 ; GCN-HSA: flat_load_dwordx4
236 ; GCN-HSA: flat_load_dwordx4
239 define amdgpu_kernel void @smrd_valu_ci_offset_x16(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(4)* %in, <16 x i32> %c) #1 {
241 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
242 %tmp2 = getelementptr <16 x i32>, <16 x i32> addrspace(4)* %in, i32 %tmp
243 %tmp3 = getelementptr <16 x i32>, <16 x i32> addrspace(4)* %tmp2, i32 1234
244 %tmp4 = load <16 x i32>, <16 x i32> addrspace(4)* %tmp3
245 %tmp5 = or <16 x i32> %tmp4, %c
246 store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out
250 ; GCN-LABEL: {{^}}smrd_valu2_salu_user:
251 ; GCN-NOHSA: buffer_load_dword [[MOVED:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
252 ; GCN-HSA: flat_load_dword [[MOVED:v[0-9]+]], v[{{[0-9+:[0-9]+}}]
253 ; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, s{{[0-9]+}}, [[MOVED]]
254 ; GCN-NOHSA: buffer_store_dword [[ADD]]
255 ; GCN-HSA: flat_store_dword {{.*}}, [[ADD]]
256 define amdgpu_kernel void @smrd_valu2_salu_user(i32 addrspace(1)* %out, [8 x i32] addrspace(4)* %in, i32 %a) #1 {
258 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
259 %tmp1 = add i32 %tmp, 4
260 %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(4)* %in, i32 %tmp, i32 4
261 %tmp3 = load i32, i32 addrspace(4)* %tmp2
262 %tmp4 = add i32 %tmp3, %a
263 store i32 %tmp4, i32 addrspace(1)* %out
267 ; GCN-LABEL: {{^}}smrd_valu2_max_smrd_offset:
268 ; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1020{{$}}
269 ; GCN-HSA flat_load_dword v{{[0-9]}}, v{{[0-9]+:[0-9]+}}
270 define amdgpu_kernel void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(4)* %in) #1 {
272 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
273 %tmp1 = add i32 %tmp, 4
274 %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(4)* %in, i32 %tmp, i32 255
275 %tmp3 = load i32, i32 addrspace(4)* %tmp2
276 store i32 %tmp3, i32 addrspace(1)* %out
280 ; GCN-LABEL: {{^}}smrd_valu2_mubuf_offset:
281 ; GCN-NOHSA-NOT: v_add
282 ; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1024{{$}}
283 ; GCN-HSA: flat_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}]
284 define amdgpu_kernel void @smrd_valu2_mubuf_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(4)* %in) #1 {
286 %tmp = call i32 @llvm.amdgcn.workitem.id.x()
287 %tmp1 = add i32 %tmp, 4
288 %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(4)* %in, i32 %tmp, i32 256
289 %tmp3 = load i32, i32 addrspace(4)* %tmp2
290 store i32 %tmp3, i32 addrspace(1)* %out
294 ; GCN-LABEL: {{^}}s_load_imm_v8i32:
295 ; GCN-NOHSA: buffer_load_dwordx4
296 ; GCN-NOHSA: buffer_load_dwordx4
297 ; GCN-HSA: flat_load_dwordx4
298 ; GCN-HSA: flat_load_dwordx4
299 define amdgpu_kernel void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
301 %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
302 %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
303 %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <8 x i32> addrspace(4)*
304 %tmp3 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp2, align 4
305 store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32
309 ; GCN-LABEL: {{^}}s_load_imm_v8i32_salu_user:
310 ; GCN-NOHSA: buffer_load_dwordx4
311 ; GCN-NOHSA: buffer_load_dwordx4
312 ; GCN-NOHSA: v_add_i32_e32
313 ; GCN-NOHSA: v_add_i32_e32
314 ; GCN-NOHSA: v_add_i32_e32
315 ; GCN-NOHSA: v_add_i32_e32
316 ; GCN-NOHSA: v_add_i32_e32
317 ; GCN-NOHSA: v_add_i32_e32
318 ; GCN-NOHSA: v_add_i32_e32
319 ; GCN-NOHSA: buffer_store_dword
320 ; GCN-HSA: flat_load_dwordx4
321 ; GCN-HSA: flat_load_dwordx4
322 define amdgpu_kernel void @s_load_imm_v8i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
324 %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
325 %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
326 %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <8 x i32> addrspace(4)*
327 %tmp3 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp2, align 4
329 %elt0 = extractelement <8 x i32> %tmp3, i32 0
330 %elt1 = extractelement <8 x i32> %tmp3, i32 1
331 %elt2 = extractelement <8 x i32> %tmp3, i32 2
332 %elt3 = extractelement <8 x i32> %tmp3, i32 3
333 %elt4 = extractelement <8 x i32> %tmp3, i32 4
334 %elt5 = extractelement <8 x i32> %tmp3, i32 5
335 %elt6 = extractelement <8 x i32> %tmp3, i32 6
336 %elt7 = extractelement <8 x i32> %tmp3, i32 7
338 %add0 = add i32 %elt0, %elt1
339 %add1 = add i32 %add0, %elt2
340 %add2 = add i32 %add1, %elt3
341 %add3 = add i32 %add2, %elt4
342 %add4 = add i32 %add3, %elt5
343 %add5 = add i32 %add4, %elt6
344 %add6 = add i32 %add5, %elt7
346 store i32 %add6, i32 addrspace(1)* %out
350 ; GCN-LABEL: {{^}}s_load_imm_v16i32:
351 ; GCN-NOHSA: buffer_load_dwordx4
352 ; GCN-NOHSA: buffer_load_dwordx4
353 ; GCN-NOHSA: buffer_load_dwordx4
354 ; GCN-NOHSA: buffer_load_dwordx4
355 ; GCN-HSA: flat_load_dwordx4
356 ; GCN-HSA: flat_load_dwordx4
357 ; GCN-HSA: flat_load_dwordx4
358 ; GCN-HSA: flat_load_dwordx4
359 define amdgpu_kernel void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
361 %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
362 %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
363 %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <16 x i32> addrspace(4)*
364 %tmp3 = load <16 x i32>, <16 x i32> addrspace(4)* %tmp2, align 4
365 store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32
369 ; GCN-LABEL: {{^}}s_load_imm_v16i32_salu_user:
370 ; GCN-NOHSA: buffer_load_dwordx4
371 ; GCN-NOHSA: buffer_load_dwordx4
372 ; GCN-NOHSA: buffer_load_dwordx4
373 ; GCN-NOHSA: buffer_load_dwordx4
374 ; GCN-NOHSA: v_add_i32_e32
375 ; GCN-NOHSA: v_add_i32_e32
376 ; GCN-NOHSA: v_add_i32_e32
377 ; GCN-NOHSA: v_add_i32_e32
378 ; GCN-NOHSA: v_add_i32_e32
379 ; GCN-NOHSA: v_add_i32_e32
380 ; GCN-NOHSA: v_add_i32_e32
381 ; GCN-NOHSA: v_add_i32_e32
382 ; GCN-NOHSA: v_add_i32_e32
383 ; GCN-NOHSA: v_add_i32_e32
384 ; GCN-NOHSA: v_add_i32_e32
385 ; GCN-NOHSA: v_add_i32_e32
386 ; GCN-NOHSA: v_add_i32_e32
387 ; GCN-NOHSA: v_add_i32_e32
388 ; GCN-NOHSA: v_add_i32_e32
389 ; GCN-NOHSA: buffer_store_dword
390 ; GCN-HSA: flat_load_dwordx4
391 ; GCN-HSA: flat_load_dwordx4
392 ; GCN-HSA: flat_load_dwordx4
393 ; GCN-HSA: flat_load_dwordx4
394 define amdgpu_kernel void @s_load_imm_v16i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(4)* nocapture readonly %in) #1 {
396 %tmp0 = tail call i32 @llvm.amdgcn.workitem.id.x()
397 %tmp1 = getelementptr inbounds i32, i32 addrspace(4)* %in, i32 %tmp0
398 %tmp2 = bitcast i32 addrspace(4)* %tmp1 to <16 x i32> addrspace(4)*
399 %tmp3 = load <16 x i32>, <16 x i32> addrspace(4)* %tmp2, align 4
401 %elt0 = extractelement <16 x i32> %tmp3, i32 0
402 %elt1 = extractelement <16 x i32> %tmp3, i32 1
403 %elt2 = extractelement <16 x i32> %tmp3, i32 2
404 %elt3 = extractelement <16 x i32> %tmp3, i32 3
405 %elt4 = extractelement <16 x i32> %tmp3, i32 4
406 %elt5 = extractelement <16 x i32> %tmp3, i32 5
407 %elt6 = extractelement <16 x i32> %tmp3, i32 6
408 %elt7 = extractelement <16 x i32> %tmp3, i32 7
409 %elt8 = extractelement <16 x i32> %tmp3, i32 8
410 %elt9 = extractelement <16 x i32> %tmp3, i32 9
411 %elt10 = extractelement <16 x i32> %tmp3, i32 10
412 %elt11 = extractelement <16 x i32> %tmp3, i32 11
413 %elt12 = extractelement <16 x i32> %tmp3, i32 12
414 %elt13 = extractelement <16 x i32> %tmp3, i32 13
415 %elt14 = extractelement <16 x i32> %tmp3, i32 14
416 %elt15 = extractelement <16 x i32> %tmp3, i32 15
418 %add0 = add i32 %elt0, %elt1
419 %add1 = add i32 %add0, %elt2
420 %add2 = add i32 %add1, %elt3
421 %add3 = add i32 %add2, %elt4
422 %add4 = add i32 %add3, %elt5
423 %add5 = add i32 %add4, %elt6
424 %add6 = add i32 %add5, %elt7
425 %add7 = add i32 %add6, %elt8
426 %add8 = add i32 %add7, %elt9
427 %add9 = add i32 %add8, %elt10
428 %add10 = add i32 %add9, %elt11
429 %add11 = add i32 %add10, %elt12
430 %add12 = add i32 %add11, %elt13
431 %add13 = add i32 %add12, %elt14
432 %add14 = add i32 %add13, %elt15
434 store i32 %add14, i32 addrspace(1)* %out
438 ; Make sure we legalize vopc operands after moving an sopc to the value.
440 ; {{^}}sopc_vopc_legalize_bug:
441 ; GCN: s_load_dword [[SGPR:s[0-9]+]]
442 ; GCN: v_cmp_le_u32_e32 vcc, [[SGPR]], v{{[0-9]+}}
443 ; GCN: s_and_b64 vcc, exec, vcc
444 ; GCN: s_cbranch_vccnz [[EXIT:[A-Z0-9_]+]]
445 ; GCN: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
446 ; GCN-NOHSA: buffer_store_dword [[ONE]]
447 ; GCN-HSA: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[ONE]]
448 ; GCN: {{^}}[[EXIT]]:
450 define amdgpu_kernel void @sopc_vopc_legalize_bug(i32 %cond, i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
452 %tmp0 = bitcast i32 %cond to float
453 %tmp1 = fadd float %tmp0, 2.500000e-01
454 %tmp2 = bitcast float %tmp1 to i32
455 %tmp3 = icmp ult i32 %tmp2, %cond
456 br i1 %tmp3, label %bb6, label %bb7
459 store i32 1, i32 addrspace(1)* %out
466 ; GCN-LABEL: {{^}}phi_visit_order:
467 ; GCN: v_add_i32_e64 v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 1, v{{[0-9]+}}
468 define amdgpu_kernel void @phi_visit_order() {
473 %tmp = phi i32 [ 0, %bb ], [ %tmp5, %bb4 ]
474 %tid = call i32 @llvm.amdgcn.workitem.id.x()
475 %cnd = icmp eq i32 %tid, 0
476 br i1 %cnd, label %bb4, label %bb2
479 %tmp3 = add nsw i32 %tmp, 1
483 %tmp5 = phi i32 [ %tmp3, %bb2 ], [ %tmp, %bb1 ]
484 store volatile i32 %tmp5, i32 addrspace(1)* undef
488 ; GCN-LABEL: {{^}}phi_imm_in_sgprs
489 ; GCN: s_movk_i32 [[A:s[0-9]+]], 0x400
490 ; GCN: s_movk_i32 [[B:s[0-9]+]], 0x400
491 ; GCN: [[LOOP_LABEL:[0-9a-zA-Z_]+]]:
492 ; GCN: s_xor_b32 [[B]], [[B]], [[A]]
493 ; GCN: s_cbranch_scc{{[01]}} [[LOOP_LABEL]]
494 define amdgpu_kernel void @phi_imm_in_sgprs(i32 addrspace(3)* %out, i32 %cond) {
499 %i = phi i32 [0, %entry], [%i.add, %loop]
500 %offset = phi i32 [1024, %entry], [%offset.xor, %loop]
501 %offset.xor = xor i32 %offset, 1024
502 %offset.i = add i32 %offset.xor, %i
503 %ptr = getelementptr i32, i32 addrspace(3)* %out, i32 %offset.i
504 store i32 0, i32 addrspace(3)* %ptr
505 %i.add = add i32 %i, 1
506 %cmp = icmp ult i32 %i.add, %cond
507 br i1 %cmp, label %loop, label %exit
513 attributes #0 = { nounwind readnone }
514 attributes #1 = { nounwind }