1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
2 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-annotate-kernel-features %s | FileCheck -check-prefix=AKF_GCN %s
3 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -amdgpu-attributor %s | FileCheck -check-prefix=ATTRIBUTOR_GCN %s
5 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
7 target datalayout = "A5"
9 define internal void @indirect() {
10 ; AKF_GCN-LABEL: define {{[^@]+}}@indirect() {
11 ; AKF_GCN-NEXT: ret void
13 ; ATTRIBUTOR_GCN-LABEL: define {{[^@]+}}@indirect
14 ; ATTRIBUTOR_GCN-SAME: () #[[ATTR0:[0-9]+]] {
15 ; ATTRIBUTOR_GCN-NEXT: ret void
17 ; GFX9-LABEL: indirect:
19 ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
20 ; GFX9-NEXT: s_setpc_b64 s[30:31]
24 define amdgpu_kernel void @test_simple_indirect_call() {
25 ; AKF_GCN-LABEL: define {{[^@]+}}@test_simple_indirect_call
26 ; AKF_GCN-SAME: () #[[ATTR0:[0-9]+]] {
27 ; AKF_GCN-NEXT: [[FPTR:%.*]] = alloca void ()*, align 8, addrspace(5)
28 ; AKF_GCN-NEXT: [[FPTR_CAST:%.*]] = addrspacecast void ()* addrspace(5)* [[FPTR]] to void ()**
29 ; AKF_GCN-NEXT: store void ()* @indirect, void ()** [[FPTR_CAST]], align 8
30 ; AKF_GCN-NEXT: [[FP:%.*]] = load void ()*, void ()** [[FPTR_CAST]], align 8
31 ; AKF_GCN-NEXT: call void [[FP]]()
32 ; AKF_GCN-NEXT: ret void
34 ; ATTRIBUTOR_GCN-LABEL: define {{[^@]+}}@test_simple_indirect_call
35 ; ATTRIBUTOR_GCN-SAME: () #[[ATTR1:[0-9]+]] {
36 ; ATTRIBUTOR_GCN-NEXT: [[FPTR:%.*]] = alloca void ()*, align 8, addrspace(5)
37 ; ATTRIBUTOR_GCN-NEXT: [[FPTR_CAST:%.*]] = addrspacecast void ()* addrspace(5)* [[FPTR]] to void ()**
38 ; ATTRIBUTOR_GCN-NEXT: store void ()* @indirect, void ()** [[FPTR_CAST]], align 8
39 ; ATTRIBUTOR_GCN-NEXT: [[FP:%.*]] = load void ()*, void ()** [[FPTR_CAST]], align 8
40 ; ATTRIBUTOR_GCN-NEXT: call void [[FP]]()
41 ; ATTRIBUTOR_GCN-NEXT: ret void
43 ; GFX9-LABEL: test_simple_indirect_call:
45 ; GFX9-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x4
46 ; GFX9-NEXT: s_add_u32 flat_scratch_lo, s6, s9
47 ; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s7, 0
48 ; GFX9-NEXT: s_add_u32 s0, s0, s9
49 ; GFX9-NEXT: s_addc_u32 s1, s1, 0
50 ; GFX9-NEXT: s_waitcnt lgkmcnt(0)
51 ; GFX9-NEXT: s_lshr_b32 s4, s4, 16
52 ; GFX9-NEXT: s_mul_i32 s4, s4, s5
53 ; GFX9-NEXT: v_mul_lo_u32 v0, s4, v0
54 ; GFX9-NEXT: s_getpc_b64 s[6:7]
55 ; GFX9-NEXT: s_add_u32 s6, s6, indirect@rel32@lo+4
56 ; GFX9-NEXT: s_addc_u32 s7, s7, indirect@rel32@hi+12
57 ; GFX9-NEXT: v_mov_b32_e32 v3, s6
58 ; GFX9-NEXT: v_mov_b32_e32 v4, s7
59 ; GFX9-NEXT: v_mad_u32_u24 v0, v1, s5, v0
60 ; GFX9-NEXT: v_add_lshl_u32 v0, v0, v2, 3
61 ; GFX9-NEXT: s_mov_b32 s32, 0
62 ; GFX9-NEXT: ds_write_b64 v0, v[3:4]
63 ; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
65 %fptr = alloca void()*, addrspace(5)
66 %fptr.cast = addrspacecast void()* addrspace(5)* %fptr to void()**
67 store void()* @indirect, void()** %fptr.cast
68 %fp = load void()*, void()** %fptr.cast
74 ; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-stack-objects" }
76 ; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
77 ; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "uniform-work-group-size"="false" }