1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s --amdgpu-lower-module-lds-strategy=module | FileCheck -check-prefixes=CHECK,MODULE %s
3 ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s --amdgpu-lower-module-lds-strategy=table | FileCheck -check-prefixes=CHECK,TABLE %s
4 ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s --amdgpu-lower-module-lds-strategy=kernel | FileCheck -check-prefixes=CHECK,K_OR_HY %s
5 ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s --amdgpu-lower-module-lds-strategy=hybrid | FileCheck -check-prefixes=CHECK,K_OR_HY %s
7 ;; Same checks for kernel and for hybrid as an unambiguous reference to a variable - one where exactly one kernel
8 ;; can reach it - is the case where hybrid lowering can always prefer the direct access.
10 ;; Single kernel is sole user of single variable, all options codegen as direct access to kernel struct
12 @k0.lds = addrspace(3) global i8 undef
13 define amdgpu_kernel void @k0() {
15 ; CHECK-NEXT: [[LD:%.*]] = load i8, ptr addrspace(3) @llvm.amdgcn.kernel.k0.lds, align 1
16 ; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[LD]], 2
17 ; CHECK-NEXT: store i8 [[MUL]], ptr addrspace(3) @llvm.amdgcn.kernel.k0.lds, align 1
18 ; CHECK-NEXT: ret void
20 %ld = load i8, ptr addrspace(3) @k0.lds
22 store i8 %mul, ptr addrspace(3) @k0.lds
26 ;; Function is reachable from one kernel. Variable goes in module lds or the kernel struct, but never both.
28 @f0.lds = addrspace(3) global i16 undef
31 ; MODULE-NEXT: [[LD:%.*]] = load i16, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !alias.scope [[META1:![0-9]+]], !noalias [[META4:![0-9]+]]
32 ; MODULE-NEXT: [[MUL:%.*]] = mul i16 [[LD]], 3
33 ; MODULE-NEXT: store i16 [[MUL]], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !alias.scope [[META1]], !noalias [[META4]]
34 ; MODULE-NEXT: ret void
37 ; TABLE-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
38 ; TABLE-NEXT: [[F0_LDS2:%.*]] = getelementptr inbounds [2 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
39 ; TABLE-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[F0_LDS2]], align 4
40 ; TABLE-NEXT: [[F0_LDS3:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
41 ; TABLE-NEXT: [[LD:%.*]] = load i16, ptr addrspace(3) [[F0_LDS3]], align 2
42 ; TABLE-NEXT: [[MUL:%.*]] = mul i16 [[LD]], 3
43 ; TABLE-NEXT: [[F0_LDS:%.*]] = getelementptr inbounds [2 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
44 ; TABLE-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[F0_LDS]], align 4
45 ; TABLE-NEXT: [[F0_LDS1:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
46 ; TABLE-NEXT: store i16 [[MUL]], ptr addrspace(3) [[F0_LDS1]], align 2
47 ; TABLE-NEXT: ret void
50 ; K_OR_HY-NEXT: [[LD:%.*]] = load i16, ptr addrspace(3) @llvm.amdgcn.kernel.k_f0.lds, align 2
51 ; K_OR_HY-NEXT: [[MUL:%.*]] = mul i16 [[LD]], 3
52 ; K_OR_HY-NEXT: store i16 [[MUL]], ptr addrspace(3) @llvm.amdgcn.kernel.k_f0.lds, align 2
53 ; K_OR_HY-NEXT: ret void
55 %ld = load i16, ptr addrspace(3) @f0.lds
57 store i16 %mul, ptr addrspace(3) @f0.lds
61 define amdgpu_kernel void @k_f0() {
62 ; MODULE-LABEL: @k_f0(
63 ; MODULE-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META5:![0-9]+]], !noalias [[META1]]
64 ; MODULE-NEXT: call void @f0()
65 ; MODULE-NEXT: ret void
68 ; TABLE-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k_f0.lds) ]
69 ; TABLE-NEXT: call void @f0()
70 ; TABLE-NEXT: ret void
72 ; K_OR_HY-LABEL: @k_f0(
73 ; K_OR_HY-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k_f0.lds) ]
74 ; K_OR_HY-NEXT: call void @f0()
75 ; K_OR_HY-NEXT: ret void
81 ;; As above, but with the kernel also uing the variable.
83 @both.lds = addrspace(3) global i32 undef
84 define void @f_both() {
85 ; MODULE-LABEL: @f_both(
86 ; MODULE-NEXT: [[LD:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !alias.scope [[META5]], !noalias [[META4]]
87 ; MODULE-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 4
88 ; MODULE-NEXT: store i32 [[MUL]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !alias.scope [[META5]], !noalias [[META4]]
89 ; MODULE-NEXT: ret void
91 ; TABLE-LABEL: @f_both(
92 ; TABLE-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
93 ; TABLE-NEXT: [[BOTH_LDS2:%.*]] = getelementptr inbounds [2 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
94 ; TABLE-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[BOTH_LDS2]], align 4
95 ; TABLE-NEXT: [[BOTH_LDS3:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
96 ; TABLE-NEXT: [[LD:%.*]] = load i32, ptr addrspace(3) [[BOTH_LDS3]], align 4
97 ; TABLE-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 4
98 ; TABLE-NEXT: [[BOTH_LDS:%.*]] = getelementptr inbounds [2 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
99 ; TABLE-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[BOTH_LDS]], align 4
100 ; TABLE-NEXT: [[BOTH_LDS1:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
101 ; TABLE-NEXT: store i32 [[MUL]], ptr addrspace(3) [[BOTH_LDS1]], align 4
102 ; TABLE-NEXT: ret void
104 ; K_OR_HY-LABEL: @f_both(
105 ; K_OR_HY-NEXT: [[LD:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds, align 4
106 ; K_OR_HY-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 4
107 ; K_OR_HY-NEXT: store i32 [[MUL]], ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds, align 4
108 ; K_OR_HY-NEXT: ret void
110 %ld = load i32, ptr addrspace(3) @both.lds
111 %mul = mul i32 %ld, 4
112 store i32 %mul, ptr addrspace(3) @both.lds
116 define amdgpu_kernel void @k0_both() {
117 ; MODULE-LABEL: @k0_both(
118 ; MODULE-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ]
119 ; MODULE-NEXT: [[LD:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !alias.scope [[META5]], !noalias [[META1]]
120 ; MODULE-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 5
121 ; MODULE-NEXT: store i32 [[MUL]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !alias.scope [[META5]], !noalias [[META1]]
122 ; MODULE-NEXT: call void @f_both()
123 ; MODULE-NEXT: ret void
125 ; TABLE-LABEL: @k0_both(
126 ; TABLE-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds) ]
127 ; TABLE-NEXT: [[LD:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds, align 4
128 ; TABLE-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 5
129 ; TABLE-NEXT: store i32 [[MUL]], ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds, align 4
130 ; TABLE-NEXT: call void @f_both()
131 ; TABLE-NEXT: ret void
133 ; K_OR_HY-LABEL: @k0_both(
134 ; K_OR_HY-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds) ]
135 ; K_OR_HY-NEXT: [[LD:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds, align 4
136 ; K_OR_HY-NEXT: [[MUL:%.*]] = mul i32 [[LD]], 5
137 ; K_OR_HY-NEXT: store i32 [[MUL]], ptr addrspace(3) @llvm.amdgcn.kernel.k0_both.lds, align 4
138 ; K_OR_HY-NEXT: call void @f_both()
139 ; K_OR_HY-NEXT: ret void
141 %ld = load i32, ptr addrspace(3) @both.lds
142 %mul = mul i32 %ld, 5
143 store i32 %mul, ptr addrspace(3) @both.lds