1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
2 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=AMDGPU
3 ; RUN: opt --mtriple=nvptx64-- -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=NVPTX
5 %struct.ident_t = type { i32, i32, i32, i32, ptr }
6 %struct.ConfigurationEnvironmentTy = type { i8, i8, i8, i32, i32, i32, i32 }
7 %struct.KernelEnvironmentTy = type { %struct.ConfigurationEnvironmentTy, ptr, ptr }
9 @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
10 @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @0 }, align 8
11 @spmd_callees_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
12 @spmd_callees_metadata_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
13 @spmd_and_non_spmd_callees_metadata_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
14 @spmd_and_non_spmd_callee_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
17 ; AMDGPU: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
18 ; AMDGPU: @[[GLOB1:[0-9]+]] = private unnamed_addr constant [[STRUCT_IDENT_T:%.*]] { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
19 ; AMDGPU: @[[SPMD_CALLEES_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
20 ; AMDGPU: @[[SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 3, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
21 ; AMDGPU: @[[SPMD_AND_NON_SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
22 ; AMDGPU: @[[SPMD_AND_NON_SPMD_CALLEE_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
24 ; NVPTX: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
25 ; NVPTX: @[[GLOB1:[0-9]+]] = private unnamed_addr constant [[STRUCT_IDENT_T:%.*]] { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
26 ; NVPTX: @[[SPMD_CALLEES_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
27 ; NVPTX: @[[SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 3, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
28 ; NVPTX: @[[SPMD_AND_NON_SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
29 ; NVPTX: @[[SPMD_AND_NON_SPMD_CALLEE_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
31 define weak void @spmd_callees(i1 %c) #0 {
32 ; AMDGPU-LABEL: define {{[^@]+}}@spmd_callees
33 ; AMDGPU-SAME: (i1 [[C:%.*]]) #[[ATTR0:[0-9]+]] {
34 ; AMDGPU-NEXT: call void @spmd_callees__debug(i1 [[C]])
35 ; AMDGPU-NEXT: ret void
37 ; NVPTX-LABEL: define {{[^@]+}}@spmd_callees
38 ; NVPTX-SAME: (i1 [[C:%.*]]) #[[ATTR0:[0-9]+]] {
39 ; NVPTX-NEXT: call void @spmd_callees__debug(i1 [[C]])
40 ; NVPTX-NEXT: ret void
42 call void @spmd_callees__debug(i1 %c)
46 define internal void @spmd_callees__debug(i1 %c) {
47 ; AMDGPU-LABEL: define {{[^@]+}}@spmd_callees__debug
48 ; AMDGPU-SAME: (i1 [[C:%.*]]) #[[ATTR1:[0-9]+]] {
50 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
51 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
52 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_kernel_environment, ptr null)
53 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
54 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
56 ; AMDGPU-NEXT: ret void
57 ; AMDGPU: user_code.entry:
58 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10:[0-9]+]]
59 ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
60 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17:![0-9]+]]
61 ; AMDGPU-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable1, ptr @__omp_outlined_spmd_amenable2
62 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable2
63 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
65 ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable2(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
66 ; AMDGPU-NEXT: br label [[TMP7:%.*]]
68 ; AMDGPU-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]]
70 ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
71 ; AMDGPU-NEXT: br label [[TMP7]]
73 ; AMDGPU-NEXT: unreachable
75 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
76 ; AMDGPU-NEXT: br label [[COMMON_RET]]
78 ; NVPTX-LABEL: define {{[^@]+}}@spmd_callees__debug
79 ; NVPTX-SAME: (i1 [[C:%.*]]) #[[ATTR1:[0-9]+]] {
81 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
82 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
83 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_kernel_environment, ptr null)
84 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
85 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
87 ; NVPTX-NEXT: ret void
88 ; NVPTX: user_code.entry:
89 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10:[0-9]+]]
90 ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
91 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17:![0-9]+]]
92 ; NVPTX-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable1, ptr @__omp_outlined_spmd_amenable2
93 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable2
94 ; NVPTX-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
96 ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable2(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
97 ; NVPTX-NEXT: br label [[TMP7:%.*]]
99 ; NVPTX-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]]
101 ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
102 ; NVPTX-NEXT: br label [[TMP7]]
104 ; NVPTX-NEXT: unreachable
106 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
107 ; NVPTX-NEXT: br label [[COMMON_RET]]
110 %.zero.addr = alloca i32, align 4
111 %.threadid_temp. = alloca i32, align 4
112 %0 = call i32 @__kmpc_target_init(ptr @spmd_callees_kernel_environment, ptr null)
113 %exec_user_code = icmp eq i32 %0, -1
114 br i1 %exec_user_code, label %user_code.entry, label %common.ret
116 common.ret: ; preds = %entry, %user_code.entry
119 user_code.entry: ; preds = %entry
120 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
121 store i32 0, ptr %.zero.addr, align 4
122 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18
123 %fp = select i1 %c, ptr @__omp_outlined_spmd_amenable1, ptr @__omp_outlined_spmd_amenable2
124 call void %fp(ptr %.threadid_temp., ptr %.zero.addr) #6
125 call void @__kmpc_target_deinit()
129 ; Function Attrs: alwaysinline convergent norecurse nounwind
130 define internal void @__omp_outlined_spmd_amenable1(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
133 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable1
134 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
135 ; AMDGPU-NEXT: entry:
136 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
137 ; AMDGPU-NEXT: br label [[FOR_COND:%.*]]
139 ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
140 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
141 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
142 ; AMDGPU: for.cond.cleanup:
143 ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6:[0-9]+]]
144 ; AMDGPU-NEXT: ret void
146 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
147 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
148 ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
149 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
151 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable1
152 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
154 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
155 ; NVPTX-NEXT: br label [[FOR_COND:%.*]]
157 ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
158 ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
159 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
160 ; NVPTX: for.cond.cleanup:
161 ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6:[0-9]+]]
162 ; NVPTX-NEXT: ret void
164 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
165 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
166 ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
167 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
170 %captured_vars_addrs = alloca [0 x ptr], align 8
173 for.cond: ; preds = %for.body, %entry
174 %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
175 %cmp = icmp slt i32 %i.0, 100
176 br i1 %cmp, label %for.body, label %for.cond.cleanup
178 for.cond.cleanup: ; preds = %for.cond
179 call void @spmd_amenable() #10
182 for.body: ; preds = %for.cond
183 %0 = load i32, ptr %.global_tid., align 4, !tbaa !18
184 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr %captured_vars_addrs, i64 0)
185 %inc = add nsw i32 %i.0, 1
186 br label %for.cond, !llvm.loop !22
189 ; Function Attrs: alwaysinline convergent norecurse nounwind
190 define internal void @__omp_outlined__1(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
193 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__1
194 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
195 ; AMDGPU-NEXT: entry:
196 ; AMDGPU-NEXT: call void @unknown() #[[ATTR7:[0-9]+]]
197 ; AMDGPU-NEXT: ret void
199 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__1
200 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
202 ; NVPTX-NEXT: call void @unknown() #[[ATTR7:[0-9]+]]
203 ; NVPTX-NEXT: ret void
206 call void @unknown() #11
210 ; Function Attrs: convergent norecurse nounwind
211 define internal void @__omp_outlined__1_wrapper(i16 zeroext %0, i32 %1) #3 {
214 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
215 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
216 ; AMDGPU-NEXT: entry:
217 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
218 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
219 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
220 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
221 ; AMDGPU-NEXT: call void @__omp_outlined__1(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
222 ; AMDGPU-NEXT: ret void
224 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
225 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
227 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
228 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
229 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
230 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
231 ; NVPTX-NEXT: call void @__omp_outlined__1(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
232 ; NVPTX-NEXT: ret void
235 %.addr1 = alloca i32, align 4
236 %.zero.addr = alloca i32, align 4
237 %global_args = alloca ptr, align 8
238 store i32 %1, ptr %.addr1, align 4, !tbaa !18
239 store i32 0, ptr %.zero.addr, align 4
240 call void @__kmpc_get_shared_variables(ptr %global_args)
241 call void @__omp_outlined__1(ptr %.addr1, ptr %.zero.addr) #6
245 ; Function Attrs: alwaysinline convergent norecurse nounwind
246 define internal void @__omp_outlined_spmd_amenable2(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
249 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable2
250 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
251 ; AMDGPU-NEXT: entry:
252 ; AMDGPU-NEXT: [[X_H2S:%.*]] = alloca i8, i64 4, align 4, addrspace(5)
253 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
254 ; AMDGPU-NEXT: [[MALLOC_CAST:%.*]] = addrspacecast ptr addrspace(5) [[X_H2S]] to ptr
255 ; AMDGPU-NEXT: call void @use(ptr nocapture [[MALLOC_CAST]]) #[[ATTR6]]
256 ; AMDGPU-NEXT: br label [[FOR_COND:%.*]]
258 ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
259 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
260 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
261 ; AMDGPU: for.cond.cleanup:
262 ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6]]
263 ; AMDGPU-NEXT: ret void
265 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
266 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
267 ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
268 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
270 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable2
271 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
273 ; NVPTX-NEXT: [[X_H2S:%.*]] = alloca i8, i64 4, align 4
274 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
275 ; NVPTX-NEXT: call void @use(ptr nocapture [[X_H2S]]) #[[ATTR6]]
276 ; NVPTX-NEXT: br label [[FOR_COND:%.*]]
278 ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
279 ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
280 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
281 ; NVPTX: for.cond.cleanup:
282 ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6]]
283 ; NVPTX-NEXT: ret void
285 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
286 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
287 ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
288 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
291 %captured_vars_addrs = alloca [0 x ptr], align 8
292 %x = call align 4 ptr @__kmpc_alloc_shared(i64 4)
293 call void @use(ptr nocapture %x) #10
296 for.cond: ; preds = %for.body, %entry
297 %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
298 %cmp = icmp slt i32 %i.0, 100
299 br i1 %cmp, label %for.body, label %for.cond.cleanup
301 for.cond.cleanup: ; preds = %for.cond
302 call void @spmd_amenable() #10
303 call void @__kmpc_free_shared(ptr %x, i64 4)
306 for.body: ; preds = %for.cond
307 %0 = load i32, ptr %.global_tid., align 4, !tbaa !18
308 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr %captured_vars_addrs, i64 0)
309 %inc = add nsw i32 %i.0, 1
310 br label %for.cond, !llvm.loop !25
312 ; Function Attrs: alwaysinline convergent norecurse nounwind
313 define internal void @__omp_outlined__3(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
316 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3
317 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
318 ; AMDGPU-NEXT: entry:
319 ; AMDGPU-NEXT: call void @unknown() #[[ATTR7]]
320 ; AMDGPU-NEXT: ret void
322 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3
323 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
325 ; NVPTX-NEXT: call void @unknown() #[[ATTR7]]
326 ; NVPTX-NEXT: ret void
329 call void @unknown() #11
333 ; Function Attrs: convergent norecurse nounwind
334 define internal void @__omp_outlined__3_wrapper(i16 zeroext %0, i32 %1) #3 {
337 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
338 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] {
339 ; AMDGPU-NEXT: entry:
340 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
341 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
342 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
343 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
344 ; AMDGPU-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
345 ; AMDGPU-NEXT: ret void
347 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
348 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] {
350 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
351 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
352 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
353 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
354 ; NVPTX-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
355 ; NVPTX-NEXT: ret void
358 %.addr1 = alloca i32, align 4
359 %.zero.addr = alloca i32, align 4
360 %global_args = alloca ptr, align 8
361 store i32 %1, ptr %.addr1, align 4, !tbaa !18
362 store i32 0, ptr %.zero.addr, align 4
363 call void @__kmpc_get_shared_variables(ptr %global_args)
364 call void @__omp_outlined__3(ptr %.addr1, ptr %.zero.addr) #6
369 ; Function Attrs: alwaysinline convergent norecurse nounwind
370 define weak void @spmd_and_non_spmd_callee(i1 %c) #0 {
373 ; AMDGPU-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callee
374 ; AMDGPU-SAME: (i1 [[C:%.*]]) #[[ATTR0]] {
375 ; AMDGPU-NEXT: entry:
376 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
377 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
378 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
379 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callee_kernel_environment, ptr null)
380 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
381 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
382 ; AMDGPU: is_worker_check:
383 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
384 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
385 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
386 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
387 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
388 ; AMDGPU: worker_state_machine.begin:
389 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
390 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
391 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
392 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
393 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast ptr [[WORKER_WORK_FN]] to ptr
394 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
395 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
396 ; AMDGPU: worker_state_machine.finished:
397 ; AMDGPU-NEXT: ret void
398 ; AMDGPU: worker_state_machine.is_active.check:
399 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
400 ; AMDGPU: worker_state_machine.parallel_region.fallback.execute:
401 ; AMDGPU-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
402 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
403 ; AMDGPU: worker_state_machine.parallel_region.end:
404 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
405 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
406 ; AMDGPU: worker_state_machine.done.barrier:
407 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
408 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
409 ; AMDGPU: thread.user_code.check:
410 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
411 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
412 ; AMDGPU: common.ret:
413 ; AMDGPU-NEXT: ret void
414 ; AMDGPU: user_code.entry:
415 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]]
416 ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
417 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]]
418 ; AMDGPU-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable3, ptr @__omp_outlined_not_spmd_amenable
419 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_not_spmd_amenable
420 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
422 ; AMDGPU-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
423 ; AMDGPU-NEXT: br label [[TMP7:%.*]]
425 ; AMDGPU-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]]
427 ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable3(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
428 ; AMDGPU-NEXT: br label [[TMP7]]
430 ; AMDGPU-NEXT: unreachable
432 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
433 ; AMDGPU-NEXT: br label [[COMMON_RET]]
435 ; NVPTX-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callee
436 ; NVPTX-SAME: (i1 [[C:%.*]]) #[[ATTR0]] {
438 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
439 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
440 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
441 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callee_kernel_environment, ptr null)
442 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
443 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
444 ; NVPTX: is_worker_check:
445 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
446 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
447 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
448 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
449 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
450 ; NVPTX: worker_state_machine.begin:
451 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
452 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
453 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
454 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast ptr [[WORKER_WORK_FN]] to ptr
455 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
456 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
457 ; NVPTX: worker_state_machine.finished:
458 ; NVPTX-NEXT: ret void
459 ; NVPTX: worker_state_machine.is_active.check:
460 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
461 ; NVPTX: worker_state_machine.parallel_region.fallback.execute:
462 ; NVPTX-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
463 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
464 ; NVPTX: worker_state_machine.parallel_region.end:
465 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
466 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
467 ; NVPTX: worker_state_machine.done.barrier:
468 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
469 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
470 ; NVPTX: thread.user_code.check:
471 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
472 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
474 ; NVPTX-NEXT: ret void
475 ; NVPTX: user_code.entry:
476 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]]
477 ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
478 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]]
479 ; NVPTX-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable3, ptr @__omp_outlined_not_spmd_amenable
480 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_not_spmd_amenable
481 ; NVPTX-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
483 ; NVPTX-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
484 ; NVPTX-NEXT: br label [[TMP7:%.*]]
486 ; NVPTX-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]]
488 ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable3(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]]
489 ; NVPTX-NEXT: br label [[TMP7]]
491 ; NVPTX-NEXT: unreachable
493 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
494 ; NVPTX-NEXT: br label [[COMMON_RET]]
497 %.zero.addr = alloca i32, align 4
498 %.threadid_temp. = alloca i32, align 4
499 %0 = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callee_kernel_environment, ptr null)
500 %exec_user_code = icmp eq i32 %0, -1
501 br i1 %exec_user_code, label %user_code.entry, label %common.ret
503 common.ret: ; preds = %entry, %user_code.entry
506 user_code.entry: ; preds = %entry
507 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
508 store i32 0, ptr %.zero.addr, align 4
509 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18
510 %fp = select i1 %c, ptr @__omp_outlined_spmd_amenable3, ptr @__omp_outlined_not_spmd_amenable
511 call void %fp(ptr %.threadid_temp., ptr %.zero.addr) #6
512 call void @__kmpc_target_deinit()
516 ; Function Attrs: alwaysinline convergent norecurse nounwind
517 define internal void @__omp_outlined_spmd_amenable3(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
520 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable3
521 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
522 ; AMDGPU-NEXT: entry:
523 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
524 ; AMDGPU-NEXT: [[X:%.*]] = call align 4 ptr @__kmpc_alloc_shared(i64 4) #[[ATTR10]]
525 ; AMDGPU-NEXT: br label [[FOR_COND:%.*]]
527 ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
528 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
529 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
530 ; AMDGPU: for.cond.cleanup:
531 ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6]]
532 ; AMDGPU-NEXT: call void @__kmpc_free_shared(ptr [[X]], i64 4) #[[ATTR10]]
533 ; AMDGPU-NEXT: ret void
535 ; AMDGPU-NEXT: store ptr [[X]], ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA25:![0-9]+]]
536 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
537 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
538 ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
539 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
541 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable3
542 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
544 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
545 ; NVPTX-NEXT: [[X:%.*]] = call align 4 ptr @__kmpc_alloc_shared(i64 4) #[[ATTR10]]
546 ; NVPTX-NEXT: br label [[FOR_COND:%.*]]
548 ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
549 ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
550 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
551 ; NVPTX: for.cond.cleanup:
552 ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6]]
553 ; NVPTX-NEXT: call void @__kmpc_free_shared(ptr [[X]], i64 4) #[[ATTR10]]
554 ; NVPTX-NEXT: ret void
556 ; NVPTX-NEXT: store ptr [[X]], ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA25:![0-9]+]]
557 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
558 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
559 ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
560 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
563 %captured_vars_addrs = alloca [1 x ptr], align 8
564 %x = call align 4 ptr @__kmpc_alloc_shared(i64 4)
567 for.cond: ; preds = %for.body, %entry
568 %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
569 %cmp = icmp slt i32 %i.0, 100
570 br i1 %cmp, label %for.body, label %for.cond.cleanup
572 for.cond.cleanup: ; preds = %for.cond
573 call void @spmd_amenable() #10
574 call void @__kmpc_free_shared(ptr %x, i64 4)
577 for.body: ; preds = %for.cond
578 store ptr %x, ptr %captured_vars_addrs, align 8, !tbaa !26
579 %0 = load i32, ptr %.global_tid., align 4, !tbaa !18
580 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr %captured_vars_addrs, i64 1)
581 %inc = add nsw i32 %i.0, 1
582 br label %for.cond, !llvm.loop !28
585 ; Function Attrs: alwaysinline convergent norecurse nounwind
586 define internal void @__omp_outlined__5(ptr noalias %.global_tid., ptr noalias %.bound_tid., ptr nonnull align 4 dereferenceable(4) %x) {
589 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5
590 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) {
591 ; AMDGPU-NEXT: entry:
592 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[X]], align 4, !tbaa [[TBAA17]]
593 ; AMDGPU-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
594 ; AMDGPU-NEXT: store i32 [[INC]], ptr [[X]], align 4, !tbaa [[TBAA17]]
595 ; AMDGPU-NEXT: call void @unknown() #[[ATTR7]]
596 ; AMDGPU-NEXT: ret void
598 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5
599 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) {
601 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[X]], align 4, !tbaa [[TBAA17]]
602 ; NVPTX-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
603 ; NVPTX-NEXT: store i32 [[INC]], ptr [[X]], align 4, !tbaa [[TBAA17]]
604 ; NVPTX-NEXT: call void @unknown() #[[ATTR7]]
605 ; NVPTX-NEXT: ret void
608 %0 = load i32, ptr %x, align 4, !tbaa !18
609 %inc = add nsw i32 %0, 1
610 store i32 %inc, ptr %x, align 4, !tbaa !18
611 call void @unknown() #11
615 ; Function Attrs: convergent norecurse nounwind
616 define internal void @__omp_outlined__5_wrapper(i16 zeroext %0, i32 %1) #3 {
619 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
620 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] {
621 ; AMDGPU-NEXT: entry:
622 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
623 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
624 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
625 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
626 ; AMDGPU-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 8
627 ; AMDGPU-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8, !tbaa [[TBAA25]]
628 ; AMDGPU-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP3]]) #[[ATTR10]]
629 ; AMDGPU-NEXT: ret void
631 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
632 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] {
634 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
635 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
636 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
637 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
638 ; NVPTX-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 8
639 ; NVPTX-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8, !tbaa [[TBAA25]]
640 ; NVPTX-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP3]]) #[[ATTR10]]
641 ; NVPTX-NEXT: ret void
644 %.addr1 = alloca i32, align 4
645 %.zero.addr = alloca i32, align 4
646 %global_args = alloca ptr, align 8
647 store i32 %1, ptr %.addr1, align 4, !tbaa !18
648 store i32 0, ptr %.zero.addr, align 4
649 call void @__kmpc_get_shared_variables(ptr %global_args)
650 %2 = load ptr, ptr %global_args, align 8
651 %3 = load ptr, ptr %2, align 8, !tbaa !26
652 call void @__omp_outlined__5(ptr %.addr1, ptr %.zero.addr, ptr %3) #6
656 ; Function Attrs: alwaysinline convergent norecurse nounwind
657 define weak void @spmd_callees_metadata(ptr %fp) #0 {
660 ; AMDGPU-LABEL: define {{[^@]+}}@spmd_callees_metadata
661 ; AMDGPU-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] {
662 ; AMDGPU-NEXT: entry:
663 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
664 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
665 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_metadata_kernel_environment, ptr null)
666 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
667 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
668 ; AMDGPU: common.ret:
669 ; AMDGPU-NEXT: ret void
670 ; AMDGPU: user_code.entry:
671 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]]
672 ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
673 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]]
674 ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]])
675 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
676 ; AMDGPU-NEXT: br label [[COMMON_RET]]
678 ; NVPTX-LABEL: define {{[^@]+}}@spmd_callees_metadata
679 ; NVPTX-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] {
681 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
682 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
683 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_metadata_kernel_environment, ptr null)
684 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
685 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
687 ; NVPTX-NEXT: ret void
688 ; NVPTX: user_code.entry:
689 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]]
690 ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
691 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]]
692 ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]])
693 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
694 ; NVPTX-NEXT: br label [[COMMON_RET]]
697 %.zero.addr = alloca i32, align 4
698 %.threadid_temp. = alloca i32, align 4
699 %0 = call i32 @__kmpc_target_init(ptr @spmd_callees_metadata_kernel_environment, ptr null)
700 %exec_user_code = icmp eq i32 %0, -1
701 br i1 %exec_user_code, label %user_code.entry, label %common.ret
703 common.ret: ; preds = %entry, %user_code.entry
706 user_code.entry: ; preds = %entry
707 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
708 store i32 0, ptr %.zero.addr, align 4
709 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18
710 call void %fp(ptr %.threadid_temp., ptr %.zero.addr), !callees !31
711 call void @__kmpc_target_deinit()
715 ; Function Attrs: alwaysinline convergent norecurse nounwind
716 define weak void @spmd_and_non_spmd_callees_metadata(ptr %fp) #0 {
719 ; AMDGPU-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callees_metadata
720 ; AMDGPU-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] {
721 ; AMDGPU-NEXT: entry:
722 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
723 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
724 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
725 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callees_metadata_kernel_environment, ptr null)
726 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
727 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
728 ; AMDGPU: is_worker_check:
729 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
730 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
731 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
732 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
733 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
734 ; AMDGPU: worker_state_machine.begin:
735 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
736 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
737 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
738 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
739 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast ptr [[WORKER_WORK_FN]] to ptr
740 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
741 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
742 ; AMDGPU: worker_state_machine.finished:
743 ; AMDGPU-NEXT: ret void
744 ; AMDGPU: worker_state_machine.is_active.check:
745 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
746 ; AMDGPU: worker_state_machine.parallel_region.fallback.execute:
747 ; AMDGPU-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
748 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
749 ; AMDGPU: worker_state_machine.parallel_region.end:
750 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
751 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
752 ; AMDGPU: worker_state_machine.done.barrier:
753 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
754 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
755 ; AMDGPU: thread.user_code.check:
756 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
757 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
758 ; AMDGPU: common.ret:
759 ; AMDGPU-NEXT: ret void
760 ; AMDGPU: user_code.entry:
761 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]]
762 ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
763 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]]
764 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable_external
765 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
767 ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]])
768 ; AMDGPU-NEXT: br label [[TMP7:%.*]]
770 ; AMDGPU-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]]
772 ; AMDGPU-NEXT: call void @__omp_outlined_not_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]])
773 ; AMDGPU-NEXT: br label [[TMP7]]
775 ; AMDGPU-NEXT: unreachable
777 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
778 ; AMDGPU-NEXT: br label [[COMMON_RET]]
780 ; NVPTX-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callees_metadata
781 ; NVPTX-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] {
783 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
784 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
785 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
786 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callees_metadata_kernel_environment, ptr null)
787 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
788 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
789 ; NVPTX: is_worker_check:
790 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
791 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
792 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
793 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
794 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
795 ; NVPTX: worker_state_machine.begin:
796 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
797 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
798 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
799 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast ptr [[WORKER_WORK_FN]] to ptr
800 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
801 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
802 ; NVPTX: worker_state_machine.finished:
803 ; NVPTX-NEXT: ret void
804 ; NVPTX: worker_state_machine.is_active.check:
805 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
806 ; NVPTX: worker_state_machine.parallel_region.fallback.execute:
807 ; NVPTX-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
808 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
809 ; NVPTX: worker_state_machine.parallel_region.end:
810 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
811 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
812 ; NVPTX: worker_state_machine.done.barrier:
813 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
814 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
815 ; NVPTX: thread.user_code.check:
816 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
817 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]]
819 ; NVPTX-NEXT: ret void
820 ; NVPTX: user_code.entry:
821 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]]
822 ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
823 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]]
824 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable_external
825 ; NVPTX-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
827 ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]])
828 ; NVPTX-NEXT: br label [[TMP7:%.*]]
830 ; NVPTX-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]]
832 ; NVPTX-NEXT: call void @__omp_outlined_not_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]])
833 ; NVPTX-NEXT: br label [[TMP7]]
835 ; NVPTX-NEXT: unreachable
837 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
838 ; NVPTX-NEXT: br label [[COMMON_RET]]
841 %.zero.addr = alloca i32, align 4
842 %.threadid_temp. = alloca i32, align 4
843 %0 = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callees_metadata_kernel_environment, ptr null)
844 %exec_user_code = icmp eq i32 %0, -1
845 br i1 %exec_user_code, label %user_code.entry, label %common.ret
847 common.ret: ; preds = %entry, %user_code.entry
850 user_code.entry: ; preds = %entry
851 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
852 store i32 0, ptr %.zero.addr, align 4
853 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18
854 call void %fp(ptr %.threadid_temp., ptr %.zero.addr), !callees !32
855 call void @__kmpc_target_deinit()
859 ; Function Attrs: alwaysinline convergent norecurse nounwind
860 define void @__omp_outlined_spmd_amenable_external(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
863 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable_external
864 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
865 ; AMDGPU-NEXT: entry:
866 ; AMDGPU-NEXT: br label [[FOR_COND:%.*]]
868 ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
869 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
870 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
871 ; AMDGPU: for.cond.cleanup:
872 ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6]]
873 ; AMDGPU-NEXT: ret void
875 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
876 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr undef, i64 0)
877 ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
878 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
880 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable_external
881 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
883 ; NVPTX-NEXT: br label [[FOR_COND:%.*]]
885 ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
886 ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100
887 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
888 ; NVPTX: for.cond.cleanup:
889 ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6]]
890 ; NVPTX-NEXT: ret void
892 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]]
893 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr undef, i64 0)
894 ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1
895 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
900 for.cond: ; preds = %for.body, %entry
901 %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
902 %cmp = icmp slt i32 %i.0, 100
903 br i1 %cmp, label %for.body, label %for.cond.cleanup
905 for.cond.cleanup: ; preds = %for.cond
906 call void @spmd_amenable() #10
909 for.body: ; preds = %for.cond
910 %0 = load i32, ptr %.global_tid., align 4, !tbaa !18
911 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr undef, i64 0)
912 %inc = add nsw i32 %i.0, 1
913 br label %for.cond, !llvm.loop !29
916 ; Function Attrs: alwaysinline convergent norecurse nounwind
917 define internal void @__omp_outlined__7(ptr noalias %.global_tid., ptr noalias %.bound_tid., ptr nonnull align 4 dereferenceable(4) %x) {
920 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7
921 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) {
922 ; AMDGPU-NEXT: entry:
923 ; AMDGPU-NEXT: ret void
925 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7
926 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) {
928 ; NVPTX-NEXT: ret void
934 ; Function Attrs: convergent norecurse nounwind
935 define internal void @__omp_outlined__7_wrapper(i16 zeroext %0, i32 %1) #3 {
938 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
939 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
940 ; AMDGPU-NEXT: entry:
941 ; AMDGPU-NEXT: ret void
943 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
944 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
946 ; NVPTX-NEXT: ret void
952 ; Function Attrs: alwaysinline convergent norecurse nounwind
953 define void @__omp_outlined_not_spmd_amenable_external(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
954 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable_external
955 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
956 ; AMDGPU-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTGLOBAL_TID_]], ptr [[DOTBOUND_TID_]])
957 ; AMDGPU-NEXT: ret void
959 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable_external
960 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
961 ; NVPTX-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTGLOBAL_TID_]], ptr [[DOTBOUND_TID_]])
962 ; NVPTX-NEXT: ret void
964 call void @__omp_outlined_not_spmd_amenable(ptr %.global_tid., ptr %.bound_tid.);
968 define internal void @__omp_outlined_not_spmd_amenable(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
971 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable
972 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
973 ; AMDGPU-NEXT: entry:
974 ; AMDGPU-NEXT: call void @unknown() #[[ATTR7]]
975 ; AMDGPU-NEXT: ret void
977 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable
978 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) {
980 ; NVPTX-NEXT: call void @unknown() #[[ATTR7]]
981 ; NVPTX-NEXT: ret void
984 call void @unknown() #11
988 ; Function Attrs: nosync nounwind
989 declare void @__kmpc_free_shared(ptr nocapture, i64) #8
991 ; Function Attrs: nofree nosync nounwind
992 declare ptr @__kmpc_alloc_shared(i64) #7
994 ; Function Attrs: convergent
995 declare void @use(ptr nocapture) #5
997 ; Function Attrs: convergent
998 declare void @unknown() #2
999 declare void @unknowni32p(ptr) #2
1001 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
1002 declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
1004 ; Make it a weak definition so we will apply custom state machine rewriting but can't use the body in the reasoning.
1005 define weak i32 @__kmpc_target_init(ptr, ptr) {
1008 ; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init
1009 ; AMDGPU-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) {
1010 ; AMDGPU-NEXT: ret i32 0
1012 ; NVPTX-LABEL: define {{[^@]+}}@__kmpc_target_init
1013 ; NVPTX-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) {
1014 ; NVPTX-NEXT: ret i32 0
1019 declare void @__kmpc_get_shared_variables(ptr)
1021 ; Function Attrs: alwaysinline
1022 declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #4
1024 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
1025 declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
1027 ; Function Attrs: convergent
1028 declare void @spmd_amenable() #5
1030 ; Function Attrs: nounwind
1031 declare i32 @__kmpc_global_thread_num(ptr) #6
1033 declare void @__kmpc_target_deinit()
1036 ; Function Attrs: alwaysinline convergent norecurse nounwind
1037 define internal void @__omp_outlined__9(ptr noalias %.global_tid., ptr noalias %.bound_tid.) {
1041 call void @unknown() #11
1045 ; Function Attrs: convergent norecurse nounwind
1046 define internal void @__omp_outlined__9_wrapper(i16 zeroext %0, i32 %1) #3 {
1050 %.addr1 = alloca i32, align 4
1051 %.zero.addr = alloca i32, align 4
1052 %global_args = alloca ptr, align 8
1053 store i32 %1, ptr %.addr1, align 4, !tbaa !18
1054 store i32 0, ptr %.zero.addr, align 4
1055 call void @__kmpc_get_shared_variables(ptr %global_args)
1056 call void @__omp_outlined__9(ptr %.addr1, ptr %.zero.addr) #6
1060 declare fastcc i32 @__kmpc_get_hardware_thread_id_in_block();
1062 attributes #0 = { alwaysinline convergent norecurse nounwind "kernel" }
1063 attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn }
1064 attributes #2 = { convergent }
1065 attributes #3 = { convergent norecurse nounwind }
1066 attributes #4 = { alwaysinline }
1067 attributes #5 = { convergent "llvm.assume"="ompx_spmd_amenable" }
1068 attributes #6 = { nounwind }
1069 attributes #7 = { nofree nosync nounwind }
1070 attributes #8 = { nosync nounwind }
1071 attributes #9 = { alwaysinline convergent nounwind }
1072 attributes #10 = { convergent "llvm.assume"="ompx_spmd_amenable" }
1073 attributes #11 = { convergent }
1075 !omp_offload.info = !{!0, !1, !2, !3, !4, !5}
1076 !nvvm.annotations = !{!6, !7, !8, !9, !10, !11}
1077 !llvm.module.flags = !{!12, !13, !14, !15, !16}
1078 !llvm.ident = !{!17}
1080 !0 = !{i32 0, i32 64770, i32 541341486, !"", i32 74, i32 5}
1081 !1 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_stack_var", i32 20, i32 1}
1082 !2 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop", i32 5, i32 0}
1083 !3 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var", i32 35, i32 2}
1084 !4 = !{i32 0, i32 64770, i32 541341486, !"do_not_spmdize_target", i32 65, i32 4}
1085 !5 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var_guarded", i32 50, i32 3}
1086 !6 = !{ptr @spmd_callees, !"kernel", i32 1}
1087 !7 = !{ptr @spmd_and_non_spmd_callees_metadata, !"kernel", i32 1}
1088 !8 = !{ptr @spmd_and_non_spmd_callee, !"kernel", i32 1}
1089 !9 = !{ptr @spmd_callees_metadata, !"kernel", i32 1}
1092 !12 = !{i32 1, !"wchar_size", i32 4}
1093 !13 = !{i32 7, !"openmp", i32 50}
1094 !14 = !{i32 7, !"openmp-device", i32 50}
1095 !15 = !{i32 8, !"PIC Level", i32 2}
1096 !16 = !{i32 7, !"frame-pointer", i32 2}
1097 !17 = !{!"clang version 14.0.0"}
1098 !18 = !{!19, !19, i64 0}
1099 !19 = !{!"int", !20, i64 0}
1100 !20 = !{!"omnipotent char", !21, i64 0}
1101 !21 = !{!"Simple C/C++ TBAA"}
1102 !22 = distinct !{!22, !23, !24}
1103 !23 = !{!"llvm.loop.mustprogress"}
1104 !24 = !{!"llvm.loop.unroll.disable"}
1105 !25 = distinct !{!25, !23, !24}
1106 !26 = !{!27, !27, i64 0}
1107 !27 = !{!"any pointer", !20, i64 0}
1108 !28 = distinct !{!28, !23, !24}
1109 !29 = distinct !{!29, !23, !24}
1110 !30 = !{!31, !27, i64 0}
1111 !31 = !{ptr @__omp_outlined_spmd_amenable_external, ptr @__omp_outlined_not_spmd_amenable}
1112 !32 = !{ptr @__omp_outlined_spmd_amenable_external, ptr @__omp_outlined_not_spmd_amenable_external}
1114 ; AMDGPU: attributes #[[ATTR0]] = { alwaysinline convergent norecurse nounwind "kernel" }
1115 ; AMDGPU: attributes #[[ATTR1]] = { norecurse }
1116 ; AMDGPU: attributes #[[ATTR2]] = { convergent norecurse nounwind }
1117 ; AMDGPU: attributes #[[ATTR3]] = { norecurse nounwind }
1118 ; AMDGPU: attributes #[[ATTR4:[0-9]+]] = { nosync nounwind }
1119 ; AMDGPU: attributes #[[ATTR5:[0-9]+]] = { nofree nosync nounwind allocsize(0) }
1120 ; AMDGPU: attributes #[[ATTR6]] = { convergent "llvm.assume"="ompx_spmd_amenable" }
1121 ; AMDGPU: attributes #[[ATTR7]] = { convergent }
1122 ; AMDGPU: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
1123 ; AMDGPU: attributes #[[ATTR9:[0-9]+]] = { alwaysinline }
1124 ; AMDGPU: attributes #[[ATTR10]] = { nounwind }
1125 ; AMDGPU: attributes #[[ATTR11:[0-9]+]] = { convergent nounwind }
1127 ; NVPTX: attributes #[[ATTR0]] = { alwaysinline convergent norecurse nounwind "kernel" }
1128 ; NVPTX: attributes #[[ATTR1]] = { norecurse }
1129 ; NVPTX: attributes #[[ATTR2]] = { convergent norecurse nounwind }
1130 ; NVPTX: attributes #[[ATTR3]] = { norecurse nounwind }
1131 ; NVPTX: attributes #[[ATTR4:[0-9]+]] = { nosync nounwind }
1132 ; NVPTX: attributes #[[ATTR5:[0-9]+]] = { nofree nosync nounwind allocsize(0) }
1133 ; NVPTX: attributes #[[ATTR6]] = { convergent "llvm.assume"="ompx_spmd_amenable" }
1134 ; NVPTX: attributes #[[ATTR7]] = { convergent }
1135 ; NVPTX: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
1136 ; NVPTX: attributes #[[ATTR9:[0-9]+]] = { alwaysinline }
1137 ; NVPTX: attributes #[[ATTR10]] = { nounwind }
1138 ; NVPTX: attributes #[[ATTR11:[0-9]+]] = { convergent nounwind }
1140 ; AMDGPU: [[META0:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"", i32 74, i32 5}
1141 ; AMDGPU: [[META1:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_stack_var", i32 20, i32 1}
1142 ; AMDGPU: [[META2:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop", i32 5, i32 0}
1143 ; AMDGPU: [[META3:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var", i32 35, i32 2}
1144 ; AMDGPU: [[META4:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"do_not_spmdize_target", i32 65, i32 4}
1145 ; AMDGPU: [[META5:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var_guarded", i32 50, i32 3}
1146 ; AMDGPU: [[META6:![0-9]+]] = !{ptr @spmd_callees, !"kernel", i32 1}
1147 ; AMDGPU: [[META7:![0-9]+]] = !{ptr @spmd_and_non_spmd_callees_metadata, !"kernel", i32 1}
1148 ; AMDGPU: [[META8:![0-9]+]] = !{ptr @spmd_and_non_spmd_callee, !"kernel", i32 1}
1149 ; AMDGPU: [[META9:![0-9]+]] = !{ptr @spmd_callees_metadata, !"kernel", i32 1}
1150 ; AMDGPU: [[META10:![0-9]+]] = !{i32 1}
1151 ; AMDGPU: [[META11:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
1152 ; AMDGPU: [[META12:![0-9]+]] = !{i32 7, !"openmp", i32 50}
1153 ; AMDGPU: [[META13:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
1154 ; AMDGPU: [[META14:![0-9]+]] = !{i32 8, !"PIC Level", i32 2}
1155 ; AMDGPU: [[META15:![0-9]+]] = !{i32 7, !"frame-pointer", i32 2}
1156 ; AMDGPU: [[META16:![0-9]+]] = !{!"clang version 14.0.0"}
1157 ; AMDGPU: [[TBAA17]] = !{!18, !18, i64 0}
1158 ; AMDGPU: [[META18:![0-9]+]] = !{!"int", !19, i64 0}
1159 ; AMDGPU: [[META19:![0-9]+]] = !{!"omnipotent char", !20, i64 0}
1160 ; AMDGPU: [[META20:![0-9]+]] = !{!"Simple C/C++ TBAA"}
1161 ; AMDGPU: [[LOOP21]] = distinct !{!21, !22, !23}
1162 ; AMDGPU: [[META22:![0-9]+]] = !{!"llvm.loop.mustprogress"}
1163 ; AMDGPU: [[META23:![0-9]+]] = !{!"llvm.loop.unroll.disable"}
1164 ; AMDGPU: [[LOOP24]] = distinct !{!24, !22, !23}
1165 ; AMDGPU: [[TBAA25]] = !{!26, !26, i64 0}
1166 ; AMDGPU: [[META26:![0-9]+]] = !{!"any pointer", !19, i64 0}
1167 ; AMDGPU: [[LOOP27]] = distinct !{!27, !22, !23}
1168 ; AMDGPU: [[LOOP28]] = distinct !{!28, !22, !23}
1170 ; NVPTX: [[META0:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"", i32 74, i32 5}
1171 ; NVPTX: [[META1:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_stack_var", i32 20, i32 1}
1172 ; NVPTX: [[META2:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop", i32 5, i32 0}
1173 ; NVPTX: [[META3:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var", i32 35, i32 2}
1174 ; NVPTX: [[META4:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"do_not_spmdize_target", i32 65, i32 4}
1175 ; NVPTX: [[META5:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var_guarded", i32 50, i32 3}
1176 ; NVPTX: [[META6:![0-9]+]] = !{ptr @spmd_callees, !"kernel", i32 1}
1177 ; NVPTX: [[META7:![0-9]+]] = !{ptr @spmd_and_non_spmd_callees_metadata, !"kernel", i32 1}
1178 ; NVPTX: [[META8:![0-9]+]] = !{ptr @spmd_and_non_spmd_callee, !"kernel", i32 1}
1179 ; NVPTX: [[META9:![0-9]+]] = !{ptr @spmd_callees_metadata, !"kernel", i32 1}
1180 ; NVPTX: [[META10:![0-9]+]] = !{i32 1}
1181 ; NVPTX: [[META11:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
1182 ; NVPTX: [[META12:![0-9]+]] = !{i32 7, !"openmp", i32 50}
1183 ; NVPTX: [[META13:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
1184 ; NVPTX: [[META14:![0-9]+]] = !{i32 8, !"PIC Level", i32 2}
1185 ; NVPTX: [[META15:![0-9]+]] = !{i32 7, !"frame-pointer", i32 2}
1186 ; NVPTX: [[META16:![0-9]+]] = !{!"clang version 14.0.0"}
1187 ; NVPTX: [[TBAA17]] = !{!18, !18, i64 0}
1188 ; NVPTX: [[META18:![0-9]+]] = !{!"int", !19, i64 0}
1189 ; NVPTX: [[META19:![0-9]+]] = !{!"omnipotent char", !20, i64 0}
1190 ; NVPTX: [[META20:![0-9]+]] = !{!"Simple C/C++ TBAA"}
1191 ; NVPTX: [[LOOP21]] = distinct !{!21, !22, !23}
1192 ; NVPTX: [[META22:![0-9]+]] = !{!"llvm.loop.mustprogress"}
1193 ; NVPTX: [[META23:![0-9]+]] = !{!"llvm.loop.unroll.disable"}
1194 ; NVPTX: [[LOOP24]] = distinct !{!24, !22, !23}
1195 ; NVPTX: [[TBAA25]] = !{!26, !26, i64 0}
1196 ; NVPTX: [[META26:![0-9]+]] = !{!"any pointer", !19, i64 0}
1197 ; NVPTX: [[LOOP27]] = distinct !{!27, !22, !23}
1198 ; NVPTX: [[LOOP28]] = distinct !{!28, !22, !23}