1 ; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=SI,GCN,MESA-GCN,FUNC %s
2 ; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=VI,GCN,MESA-VI,MESA-GCN,FUNC %s
3 ; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=gfx900 -mattr=-code-object-v3 -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=VI,GCN,HSA-GFX9,FUNC %s
4 ; RUN: llc < %s -march=r600 -mcpu=redwood -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=EG,EGCM,FUNC %s
5 ; RUN: llc < %s -march=r600 -mcpu=cayman -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=CM,EGCM,FUNC %s
7 ; FUNC-LABEL: {{^}}i8_arg:
8 ; HSA-GFX9: kernarg_segment_byte_size = 12
9 ; HSA-GFX9: kernarg_segment_alignment = 4
11 ; SI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
12 ; MESA-VI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
13 ; MESA-GCN: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xff
15 ; HSA-GFX9: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x8
16 ; HSA-GFX9: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xff
19 ; EGCM: VTX_READ_8{{.*}} #3
21 define amdgpu_kernel void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
22 %ext = zext i8 %in to i32
23 store i32 %ext, i32 addrspace(1)* %out, align 4
27 ; FUNC-LABEL: {{^}}i8_zext_arg:
28 ; HSA-GFX9: kernarg_segment_byte_size = 12
29 ; HSA-GFX9: kernarg_segment_alignment = 4
30 ; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
31 ; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
33 ; HSA-GFX9: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x8
34 ; HSA-GFX9: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xff
37 ; EG: BFE_INT T0.X, T0.X, 0.0, literal.x,
38 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
39 ; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45)
41 ; CM: BFE_INT * T0.X, T0.X, 0.0, literal.x,
42 ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
43 ; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
44 ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
45 define amdgpu_kernel void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
46 %ext = zext i8 %in to i32
47 store i32 %ext, i32 addrspace(1)* %out, align 4
51 ; FUNC-LABEL: {{^}}i8_sext_arg:
52 ; HSA-GFX9: kernarg_segment_byte_size = 12
53 ; HSA-GFX9: kernarg_segment_alignment = 4
54 ; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
56 ; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
58 ; HSA-GFX9: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x8
59 ; HSA-GFX9: s_sext_i32_i8 s{{[0-9]+}}, [[VAL]]
60 ; HSA-GFX9: global_store_dword
63 ; EG: BFE_INT T0.X, T0.X, 0.0, literal.x,
64 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
65 ; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45)
67 ; CM: BFE_INT * T0.X, T0.X, 0.0, literal.x,
68 ; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
69 ; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
70 ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
71 define amdgpu_kernel void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
72 %ext = sext i8 %in to i32
73 store i32 %ext, i32 addrspace(1)* %out, align 4
77 ; FUNC-LABEL: {{^}}i16_arg:
78 ; HSA-GFX9: kernarg_segment_byte_size = 12
79 ; HSA-GFX9: kernarg_segment_alignment = 4
81 ; SI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
83 ; MESA-VI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c
84 ; MESA-GCN: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xff
86 ; HSA-GFX9: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x8
87 ; HSA-GFX9: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xffff{{$}}
88 ; HSA-GFX9: global_store_dword
92 define amdgpu_kernel void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
93 %ext = zext i16 %in to i32
94 store i32 %ext, i32 addrspace(1)* %out, align 4
98 ; FUNC-LABEL: {{^}}i16_zext_arg:
99 ; HSA-GFX9: kernarg_segment_byte_size = 12
100 ; HSA-GFX9: kernarg_segment_alignment = 4
102 ; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
103 ; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
105 ; HSA-GFX9: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x8
106 ; HSA-GFX9: s_and_b32 s{{[0-9]+}}, [[VAL]], 0xffff{{$}}
107 ; HSA-GFX9: global_store_dword
109 ; EG: BFE_INT T0.X, T0.X, 0.0, literal.x,
110 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
111 ; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
113 ; CM: BFE_INT * T0.X, T0.X, 0.0, literal.x,
114 ; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
115 ; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
116 ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
117 define amdgpu_kernel void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
118 %ext = zext i16 %in to i32
119 store i32 %ext, i32 addrspace(1)* %out, align 4
123 ; FUNC-LABEL: {{^}}i16_sext_arg:
124 ; HSA-GFX9: kernarg_segment_byte_size = 12
125 ; HSA-GFX9: kernarg_segment_alignment = 4
127 ; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
128 ; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
131 ; HSA-GFX9: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x8
132 ; HSA-GFX9: s_sext_i32_i16 s{{[0-9]+}}, [[VAL]]
133 ; HSA-GFX9: global_store_dword
135 ; EG: BFE_INT T0.X, T0.X, 0.0, literal.x,
136 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
137 ; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
139 ; CM: BFE_INT * T0.X, T0.X, 0.0, literal.x,
140 ; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
141 ; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
142 ; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
143 define amdgpu_kernel void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
144 %ext = sext i16 %in to i32
145 store i32 %ext, i32 addrspace(1)* %out, align 4
149 ; FUNC-LABEL: {{^}}i32_arg:
150 ; HSA-GFX9: kernarg_segment_byte_size = 12
151 ; HSA-GFX9: kernarg_segment_alignment = 4
153 ; EGCM: T{{[0-9]\.[XYZW]}}, KC0[2].Z
154 ; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
155 ; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
156 ; HSA-GFX9: s_load_dword s{{[0-9]}}, s[4:5], 0x8
157 define amdgpu_kernel void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
159 store i32 %in, i32 addrspace(1)* %out, align 4
163 ; FUNC-LABEL: {{^}}f32_arg:
164 ; HSA-GFX9: kernarg_segment_byte_size = 12
165 ; HSA-GFX9: kernarg_segment_alignment = 4
166 ; EGCM: T{{[0-9]\.[XYZW]}}, KC0[2].Z
167 ; SI: s_load_dword s{{[0-9]}}, s[0:1], 0xb
168 ; MESA-VI: s_load_dword s{{[0-9]}}, s[0:1], 0x2c
169 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x8
170 define amdgpu_kernel void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
172 store float %in, float addrspace(1)* %out, align 4
176 ; FUNC-LABEL: {{^}}v2i8_arg:
177 ; HSA-GFX9: kernarg_segment_byte_size = 12
178 ; HSA-GFX9: kernarg_segment_alignment = 4
183 ; GCN: s_load_dword s
184 ; GCN-NOT: {{buffer|flat|global}}_load_
185 define amdgpu_kernel void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
187 store <2 x i8> %in, <2 x i8> addrspace(1)* %out
191 ; FUNC-LABEL: {{^}}v2i16_arg:
192 ; HSA-GFX9: kernarg_segment_byte_size = 12
193 ; HSA-GFX9: kernarg_segment_alignment = 4
198 ; SI: s_load_dword s{{[0-9]+}}, s[0:1], 0xb
199 ; MESA-VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
200 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x8
201 define amdgpu_kernel void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in) {
203 store <2 x i16> %in, <2 x i16> addrspace(1)* %out
207 ; FUNC-LABEL: {{^}}v2i32_arg:
208 ; HSA-GFX9: kernarg_segment_byte_size = 16
209 ; HSA-GFX9: kernarg_segment_alignment = 4
211 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
212 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
213 ; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
214 ; MESA-VI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x2c
215 ; HSA-GFX9: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x8
216 define amdgpu_kernel void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
218 store <2 x i32> %in, <2 x i32> addrspace(1)* %out, align 4
222 ; FUNC-LABEL: {{^}}v2f32_arg:
223 ; HSA-GFX9: kernarg_segment_byte_size = 16
224 ; HSA-GFX9: kernarg_segment_alignment = 4
226 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].X
227 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[2].W
228 ; SI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xb
229 ; MESA-VI: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x2c
230 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]:[0-9]\]}}, s[4:5], 0x8
231 define amdgpu_kernel void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
233 store <2 x float> %in, <2 x float> addrspace(1)* %out, align 4
237 ; FUNC-LABEL: {{^}}v3i8_arg:
238 ; HSA-GFX9: kernarg_segment_byte_size = 12
239 ; HSA-GFX9: kernarg_segment_alignment = 4
241 ; EGCM-DAG: VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 40
242 ; EGCM-DAG: VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 41
243 ; EGCM-DAG: VTX_READ_8 T{{[0-9]}}.X, T{{[0-9]}}.X, 42
245 ; SI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
247 ; VI-MESA: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
248 ; VI-HSA: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x8
249 define amdgpu_kernel void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x i8> %in) nounwind {
251 store <3 x i8> %in, <3 x i8> addrspace(1)* %out, align 4
255 ; FUNC-LABEL: {{^}}v3i16_arg:
256 ; HSA-GFX9: kernarg_segment_byte_size = 16
257 ; HSA-GFX9: kernarg_segment_alignment = 4
259 ; EGCM-DAG: VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 44
260 ; EGCM-DAG: VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 46
261 ; EGCM-DAG: VTX_READ_16 T{{[0-9]}}.X, T{{[0-9]}}.X, 48
263 ; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
265 ; VI-HSA: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x8
266 ; VI-MESA: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
267 define amdgpu_kernel void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3 x i16> %in) nounwind {
269 store <3 x i16> %in, <3 x i16> addrspace(1)* %out, align 4
273 ; FUNC-LABEL: {{^}}v3i32_arg:
274 ; HSA-GFX9: kernarg_segment_byte_size = 32
275 ; HSA-GFX9: kernarg_segment_alignment = 4
276 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
277 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
278 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
279 ; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
280 ; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34
281 ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
282 define amdgpu_kernel void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
284 store <3 x i32> %in, <3 x i32> addrspace(1)* %out, align 4
288 ; FUNC-LABEL: {{^}}v3f32_arg:
289 ; HSA-GFX9: kernarg_segment_byte_size = 32
290 ; HSA-GFX9: kernarg_segment_alignment = 4
291 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
292 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
293 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
294 ; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0xd
295 ; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x34
296 ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
297 define amdgpu_kernel void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
299 store <3 x float> %in, <3 x float> addrspace(1)* %out, align 4
303 ; FUNC-LABEL: {{^}}v4i8_arg:
304 ; HSA-GFX9: kernarg_segment_byte_size = 12
305 ; HSA-GFX9: kernarg_segment_alignment = 4
311 ; GCN-DAG: s_load_dwordx2 s
312 ; GCN-DAG: s_load_dword s
313 define amdgpu_kernel void @v4i8_arg(<4 x i8> addrspace(1)* %out, <4 x i8> %in) {
315 store <4 x i8> %in, <4 x i8> addrspace(1)* %out
319 ; FUNC-LABEL: {{^}}v4i16_arg:
320 ; HSA-GFX9: kernarg_segment_byte_size = 16
321 ; HSA-GFX9: kernarg_segment_alignment = 4
327 ; SI-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0xb
328 ; SI-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x9
330 ; MESA-VI-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x24
331 ; MESA-VI-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x2c
334 ; MESA-VI-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x24
335 ; MESA-VI-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x2c
337 ; HSA-GFX9-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x0
338 ; HSA-GFX9-DAG: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
339 define amdgpu_kernel void @v4i16_arg(<4 x i16> addrspace(1)* %out, <4 x i16> %in) {
341 store <4 x i16> %in, <4 x i16> addrspace(1)* %out
345 ; FUNC-LABEL: {{^}}v4i32_arg:
346 ; HSA-GFX9: kernarg_segment_byte_size = 32
347 ; HSA-GFX9: kernarg_segment_alignment = 4
348 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
349 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
350 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
351 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
353 ; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
354 ; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x34
355 ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
356 define amdgpu_kernel void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
358 store <4 x i32> %in, <4 x i32> addrspace(1)* %out, align 4
362 ; FUNC-LABEL: {{^}}v4f32_arg:
363 ; HSA-GFX9: kernarg_segment_byte_size = 32
364 ; HSA-GFX9: kernarg_segment_alignment = 4
365 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Y
366 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].Z
367 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[3].W
368 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].X
369 ; SI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0xd
370 ; MESA-VI: s_load_dwordx4 s{{\[[0-9]:[0-9]\]}}, s[0:1], 0x34
371 ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x10
372 define amdgpu_kernel void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
374 store <4 x float> %in, <4 x float> addrspace(1)* %out, align 4
378 ; FIXME: Lots of unpack and re-pack junk on VI
379 ; FUNC-LABEL: {{^}}v8i8_arg:
380 ; HSA-GFX9: kernarg_segment_byte_size = 16
381 ; HSA-GFX9: kernarg_segment_alignment = 4
391 ; SI-NOT: {{buffer|flat|global}}_load
392 ; SI: s_load_dwordx2 s
393 ; SI-NEXT: s_load_dwordx2 s
394 ; SI-NOT: {{buffer|flat|global}}_load
396 ; VI: s_load_dwordx2 s
397 ; VI-NEXT: s_load_dwordx2 s
401 define amdgpu_kernel void @v8i8_arg(<8 x i8> addrspace(1)* %out, <8 x i8> %in) {
403 store <8 x i8> %in, <8 x i8> addrspace(1)* %out
407 ; FUNC-LABEL: {{^}}v8i16_arg:
408 ; HSA-GFX9: kernarg_segment_byte_size = 32
409 ; HSA-GFX9: kernarg_segment_alignment = 4
420 ; SI-NEXT: s_load_dwordx2
421 ; SI-NOT: {{buffer|flat|global}}_load
424 ; MESA-VI: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x34
426 ; HSA-GFX9: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x10
427 define amdgpu_kernel void @v8i16_arg(<8 x i16> addrspace(1)* %out, <8 x i16> %in) {
429 store <8 x i16> %in, <8 x i16> addrspace(1)* %out
433 ; FUNC-LABEL: {{^}}v8i32_arg:
434 ; HSA-GFX9: kernarg_segment_byte_size = 64
435 ; HSA-GFX9: kernarg_segment_alignment = 5
436 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
437 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
438 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
439 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].X
440 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Y
441 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
442 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
443 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
445 ; SI: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x11
446 ; MESA-VI: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x44
447 ; HSA-GFX9: s_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x20
448 define amdgpu_kernel void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
450 store <8 x i32> %in, <8 x i32> addrspace(1)* %out, align 4
454 ; FUNC-LABEL: {{^}}v8f32_arg:
455 ; HSA-GFX9: kernarg_segment_byte_size = 64
456 ; HSA-GFX9: kernarg_segment_alignment = 5
457 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Y
458 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].Z
459 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[4].W
460 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].X
461 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Y
462 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].Z
463 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[5].W
464 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].X
465 ; SI: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x11
466 define amdgpu_kernel void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
468 store <8 x float> %in, <8 x float> addrspace(1)* %out, align 4
472 ; FIXME: Pack/repack on VI
474 ; FUNC-LABEL: {{^}}v16i8_arg:
475 ; HSA-GFX9: kernarg_segment_byte_size = 32
476 ; HSA-GFX9: kernarg_segment_alignment = 4
494 ; SI: s_load_dwordx4 s
495 ; SI-NEXT: s_load_dwordx2 s
496 ; SI-NOT: {{buffer|flat|global}}_load
499 ; VI: s_load_dwordx4 s
504 define amdgpu_kernel void @v16i8_arg(<16 x i8> addrspace(1)* %out, <16 x i8> %in) {
506 store <16 x i8> %in, <16 x i8> addrspace(1)* %out
510 ; FUNC-LABEL: {{^}}v16i16_arg:
511 ; HSA-GFX9: kernarg_segment_byte_size = 64
512 ; HSA-GFX9: kernarg_segment_alignment = 5
531 ; SI: s_load_dwordx8 s
532 ; SI-NEXT: s_load_dwordx2 s
533 ; SI-NOT: {{buffer|flat|global}}_load
536 ; MESA-VI: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x44
538 ; HSA-GFX9: s_load_dwordx8 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
539 define amdgpu_kernel void @v16i16_arg(<16 x i16> addrspace(1)* %out, <16 x i16> %in) {
541 store <16 x i16> %in, <16 x i16> addrspace(1)* %out
545 ; FUNC-LABEL: {{^}}v16i32_arg:
546 ; HSA-GFX9: kernarg_segment_byte_size = 128
547 ; HSA-GFX9: kernarg_segment_alignment = 6
548 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
549 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
550 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
551 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].X
552 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Y
553 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Z
554 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].W
555 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].X
556 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Y
557 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Z
558 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].W
559 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].X
560 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Y
561 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
562 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
563 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
564 ; SI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x19
565 ; MESA-VI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x64
566 ; HSA-GFX9: s_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x40
567 define amdgpu_kernel void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
569 store <16 x i32> %in, <16 x i32> addrspace(1)* %out, align 4
573 ; FUNC-LABEL: {{^}}v16f32_arg:
574 ; HSA-GFX9: kernarg_segment_byte_size = 128
575 ; HSA-GFX9: kernarg_segment_alignment = 6
576 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Y
577 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].Z
578 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[6].W
579 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].X
580 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Y
581 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].Z
582 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[7].W
583 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].X
584 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Y
585 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].Z
586 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[8].W
587 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].X
588 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Y
589 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].Z
590 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[9].W
591 ; EGCM-DAG: T{{[0-9]\.[XYZW]}}, KC0[10].X
592 ; SI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x19
593 ; MESA-VI: s_load_dwordx16 s{{\[[0-9]+:[0-9]+\]}}, s[0:1], 0x64
594 ; HSA-GFX9: s_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x40
595 define amdgpu_kernel void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
597 store <16 x float> %in, <16 x float> addrspace(1)* %out, align 4
601 ; FUNC-LABEL: {{^}}kernel_arg_i64:
602 ; MESA-VI: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[0:1], 0x24
603 ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
605 ; MESA-GCN: buffer_store_dwordx2
606 define amdgpu_kernel void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
607 store i64 %a, i64 addrspace(1)* %out, align 8
611 ; FUNC-LABEL: {{^}}f64_kernel_arg:
612 ; SI-DAG: s_load_dwordx4 s[{{[0-9]:[0-9]}}], s[0:1], 0x9
613 ; MESA-VI-DAG: s_load_dwordx4 s[{{[0-9]:[0-9]}}], s[0:1], 0x24
614 ; MESA-GCN: buffer_store_dwordx2
616 ; HSA-GFX9: s_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
617 define amdgpu_kernel void @f64_kernel_arg(double addrspace(1)* %out, double %in) {
619 store double %in, double addrspace(1)* %out
623 ; XFUNC-LABEL: {{^}}kernel_arg_v1i64:
624 ; XGCN: s_load_dwordx2
625 ; XGCN: s_load_dwordx2
626 ; XGCN: buffer_store_dwordx2
627 ; define amdgpu_kernel void @kernel_arg_v1i64(<1 x i64> addrspace(1)* %out, <1 x i64> %a) nounwind {
628 ; store <1 x i64> %a, <1 x i64> addrspace(1)* %out, align 8
632 ; FUNC-LABEL: {{^}}i65_arg:
633 ; HSA-GFX9: kernarg_segment_byte_size = 24
634 ; HSA-GFX9: kernarg_segment_alignment = 4
635 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x0
636 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
637 define amdgpu_kernel void @i65_arg(i65 addrspace(1)* nocapture %out, i65 %in) nounwind {
639 store i65 %in, i65 addrspace(1)* %out, align 4
643 ; FUNC-LABEL: {{^}}i1_arg:
644 ; HSA-GFX9: kernarg_segment_byte_size = 12
645 ; HSA-GFX9: kernarg_segment_alignment = 4
647 ; GCN: s_load_dword s
649 ; GCN: {{buffer|flat|global}}_store_byte
650 define amdgpu_kernel void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
651 store i1 %x, i1 addrspace(1)* %out, align 1
655 ; FUNC-LABEL: {{^}}i1_arg_zext_i32:
656 ; HSA-GFX9: kernarg_segment_byte_size = 12
657 ; HSA-GFX9: kernarg_segment_alignment = 4
660 ; SGCN: buffer_store_dword
661 define amdgpu_kernel void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
662 %ext = zext i1 %x to i32
663 store i32 %ext, i32 addrspace(1)* %out, align 4
667 ; FUNC-LABEL: {{^}}i1_arg_zext_i64:
668 ; HSA-GFX9: kernarg_segment_byte_size = 12
669 ; HSA-GFX9: kernarg_segment_alignment = 4
671 ; GCN: s_load_dword s
672 ; GCN: {{buffer|flat|global}}_store_dwordx2
673 define amdgpu_kernel void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
674 %ext = zext i1 %x to i64
675 store i64 %ext, i64 addrspace(1)* %out, align 8
679 ; FUNC-LABEL: {{^}}i1_arg_sext_i32:
680 ; HSA-GFX9: kernarg_segment_byte_size = 12
681 ; HSA-GFX9: kernarg_segment_alignment = 4
684 ; GCN: {{buffer|flat|global}}_store_dword
685 define amdgpu_kernel void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
686 %ext = sext i1 %x to i32
687 store i32 %ext, i32addrspace(1)* %out, align 4
691 ; FUNC-LABEL: {{^}}i1_arg_sext_i64:
692 ; HSA-GFX9: kernarg_segment_byte_size = 12
693 ; HSA-GFX9: kernarg_segment_alignment = 4
697 ; GCN: {{buffer|flat|global}}_store_dwordx2
698 define amdgpu_kernel void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
699 %ext = sext i1 %x to i64
700 store i64 %ext, i64 addrspace(1)* %out, align 8
704 ; FUNC-LABEL: {{^}}empty_struct_arg:
705 ; HSA-GFX9: kernarg_segment_byte_size = 0
706 define amdgpu_kernel void @empty_struct_arg({} %in) nounwind {
710 ; The correct load offsets for these:
716 ; With the SelectionDAG argument lowering, the alignments for the
717 ; struct members is not properly considered, making these wrong.
719 ; FIXME: Total argument size is computed wrong
720 ; FUNC-LABEL: {{^}}struct_argument_alignment:
721 ; HSA-GFX9: kernarg_segment_byte_size = 40
722 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
723 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
724 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x18
725 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
726 define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0, i8, {i32, i64} %arg1) {
727 %val0 = extractvalue {i32, i64} %arg0, 0
728 %val1 = extractvalue {i32, i64} %arg0, 1
729 %val2 = extractvalue {i32, i64} %arg1, 0
730 %val3 = extractvalue {i32, i64} %arg1, 1
731 store volatile i32 %val0, i32 addrspace(1)* null
732 store volatile i64 %val1, i64 addrspace(1)* null
733 store volatile i32 %val2, i32 addrspace(1)* null
734 store volatile i64 %val3, i64 addrspace(1)* null
738 ; No padding between i8 and next struct, but round up at end to 4 byte
740 ; FUNC-LABEL: {{^}}packed_struct_argument_alignment:
741 ; HSA-GFX9: kernarg_segment_byte_size = 28
742 ; HSA-GFX9: global_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:13
743 ; HSA-GFX9: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:17
744 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
745 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x4
746 define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}> %arg0, i8, <{i32, i64}> %arg1) {
747 %val0 = extractvalue <{i32, i64}> %arg0, 0
748 %val1 = extractvalue <{i32, i64}> %arg0, 1
749 %val2 = extractvalue <{i32, i64}> %arg1, 0
750 %val3 = extractvalue <{i32, i64}> %arg1, 1
751 store volatile i32 %val0, i32 addrspace(1)* null
752 store volatile i64 %val1, i64 addrspace(1)* null
753 store volatile i32 %val2, i32 addrspace(1)* null
754 store volatile i64 %val3, i64 addrspace(1)* null
758 ; GCN-LABEL: {{^}}struct_argument_alignment_after:
759 ; HSA-GFX9: kernarg_segment_byte_size = 64
760 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
761 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
762 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x18
763 ; HSA-GFX9: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
764 ; HSA-GFX9: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x30
765 define amdgpu_kernel void @struct_argument_alignment_after({i32, i64} %arg0, i8, {i32, i64} %arg2, i8, <4 x i32> %arg4) {
766 %val0 = extractvalue {i32, i64} %arg0, 0
767 %val1 = extractvalue {i32, i64} %arg0, 1
768 %val2 = extractvalue {i32, i64} %arg2, 0
769 %val3 = extractvalue {i32, i64} %arg2, 1
770 store volatile i32 %val0, i32 addrspace(1)* null
771 store volatile i64 %val1, i64 addrspace(1)* null
772 store volatile i32 %val2, i32 addrspace(1)* null
773 store volatile i64 %val3, i64 addrspace(1)* null
774 store volatile <4 x i32> %arg4, <4 x i32> addrspace(1)* null
778 ; GCN-LABEL: {{^}}array_3xi32:
779 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
780 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x4
781 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0x8
782 ; HSA-GFX9: s_load_dword s{{[0-9]+}}, s[4:5], 0xc
783 define amdgpu_kernel void @array_3xi32(i16 %arg0, [3 x i32] %arg1) {
784 store volatile i16 %arg0, i16 addrspace(1)* undef
785 store volatile [3 x i32] %arg1, [3 x i32] addrspace(1)* undef
789 ; FIXME: Why not all scalar loads?
790 ; GCN-LABEL: {{^}}array_3xi16:
791 ; HSA-GFX9: global_load_ushort v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:2
792 ; HSA-GFX9: global_load_ushort v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:4
793 ; HSA-GFX9: global_load_ushort v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:6
794 define amdgpu_kernel void @array_3xi16(i8 %arg0, [3 x i16] %arg1) {
795 store volatile i8 %arg0, i8 addrspace(1)* undef
796 store volatile [3 x i16] %arg1, [3 x i16] addrspace(1)* undef
800 ; GCN-LABEL: {{^}}small_array_round_down_offset:
801 ; HSA-GFX9: global_load_ubyte v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:1
802 define amdgpu_kernel void @small_array_round_down_offset(i8, [1 x i8] %arg) {
803 %val = extractvalue [1 x i8] %arg, 0
804 store volatile i8 %val, i8 addrspace(1)* undef