1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals --include-generated-funcs
2 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -S -passes=openmp-opt < %s | FileCheck %s --check-prefix=AMDGPU1
3 ; RUN: opt --mtriple=nvptx64-- -S -passes=openmp-opt < %s | FileCheck %s --check-prefix=NVPTX1
4 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -openmp-opt-disable-state-machine-rewrite -S -passes=openmp-opt < %s | FileCheck %s --check-prefix=AMDGPU2
5 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -S -passes=openmp-opt-postlink < %s | FileCheck %s --check-prefix=AMDGPU3
6 ; RUN: opt --mtriple=nvptx64-- -openmp-opt-disable-state-machine-rewrite -S -passes=openmp-opt < %s | FileCheck %s --check-prefix=NVPTX2
7 ; RUN: opt --mtriple=nvptx64-- -S -passes=openmp-opt-postlink < %s | FileCheck %s --check-prefix=NVPTX3
12 ;; void unknown_pure(void) __attribute__((pure));
13 ;; [[omp::assume("omp_no_openmp")]] void unknown_no_openmp(void);
16 ;; void no_parallel_region_in_here(void) {
21 ;; void no_state_machine_needed() {
22 ;; #pragma omp target teams
24 ;; no_parallel_region_in_here();
25 ;; unknown_no_openmp();
29 ;; void simple_state_machine() {
30 ;; #pragma omp target teams
32 ;; unknown_no_openmp();
33 ;; #pragma omp parallel
35 ;; no_parallel_region_in_here();
36 ;; #pragma omp parallel
41 ;; void simple_state_machine_interprocedural_after(void);
42 ;; void simple_state_machine_interprocedural_before(void) {
43 ;; #pragma omp parallel
46 ;; void simple_state_machine_interprocedural() {
47 ;; #pragma omp target teams
49 ;; unknown_no_openmp();
50 ;; simple_state_machine_interprocedural_before();
51 ;; no_parallel_region_in_here();
52 ;; #pragma omp parallel
54 ;; simple_state_machine_interprocedural_after();
57 ;; void simple_state_machine_interprocedural_after(void) {
58 ;; #pragma omp parallel
62 ;; void simple_state_machine_with_fallback() {
63 ;; #pragma omp target teams
65 ;; #pragma omp parallel
68 ;; #pragma omp parallel
73 ;; void simple_state_machine_no_openmp_attr() {
74 ;; #pragma omp target teams
76 ;; #pragma omp parallel
78 ;; unknown_no_openmp();
79 ;; #pragma omp parallel
84 ;; void simple_state_machine_pure() {
85 ;; #pragma omp target teams
87 ;; unknown_no_openmp();
88 ;; #pragma omp parallel
91 ;; #pragma omp parallel
96 ;; int omp_get_thread_num();
97 ;; void simple_state_machine_interprocedural_nested_recursive_after(int);
98 ;; void simple_state_machine_interprocedural_nested_recursive_after_after(void);
99 ;; void simple_state_machine_interprocedural_nested_recursive() {
100 ;; #pragma omp target teams
102 ;; simple_state_machine_interprocedural_nested_recursive_after(
103 ;; omp_get_thread_num());
107 ;; void simple_state_machine_interprocedural_nested_recursive_after(int a) {
110 ;; simple_state_machine_interprocedural_nested_recursive_after(a - 1);
111 ;; simple_state_machine_interprocedural_nested_recursive_after_after();
113 ;; void simple_state_machine_interprocedural_nested_recursive_after_after(void) {
114 ;; #pragma omp parallel
118 ;; __attribute__((weak)) void weak_callee_empty(void) {}
119 ;; void no_state_machine_weak_callee() {
120 ;; #pragma omp target teams
121 ;; { weak_callee_empty(); }
124 %struct.ident_t = type { i32, i32, i32, i32, ptr }
125 %struct.KernelEnvironmentTy = type { %struct.ConfigurationEnvironmentTy, ptr, ptr }
126 %struct.ConfigurationEnvironmentTy = type { i8, i8, i8, i32, i32, i32, i32, i32, i32 }
128 @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
129 @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @0 }, align 8
130 @2 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @0 }, align 8
131 @G = external global i32, align 4
132 @3 = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @0 }, align 8
133 @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
134 @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
135 @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
136 @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
137 @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
138 @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
139 @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
140 @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
142 define weak void @__omp_offloading_14_a36502b_no_state_machine_needed_l14(ptr %dyn) #0 {
144 %.zero.addr = alloca i32, align 4
145 %.threadid_temp. = alloca i32, align 4
146 store i32 0, ptr %.zero.addr, align 4
147 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr %dyn)
148 %exec_user_code = icmp eq i32 %0, -1
149 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
151 user_code.entry: ; preds = %entry
152 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
153 store i32 %1, ptr %.threadid_temp., align 4
154 call void @__omp_outlined__(ptr %.threadid_temp., ptr %.zero.addr) #3
155 call void @__kmpc_target_deinit()
158 worker.exit: ; preds = %entry
162 ; Make it a declaration so we will *not* apply custom state machine rewriting and wait for LTO.
163 declare i32 @__kmpc_target_init(ptr);
165 define internal void @__omp_outlined__(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
167 %.global_tid..addr = alloca ptr, align 8
168 %.bound_tid..addr = alloca ptr, align 8
169 store ptr %.global_tid., ptr %.global_tid..addr, align 8
170 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
171 call void @no_parallel_region_in_here() #7
172 call void @unknown_no_openmp() #8
176 define hidden void @no_parallel_region_in_here() #1 {
178 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
179 %1 = call i32 @__kmpc_single(ptr @2, i32 %0)
180 %2 = icmp ne i32 %1, 0
181 br i1 %2, label %omp_if.then, label %omp_if.end
183 omp_if.then: ; preds = %entry
184 store i32 0, ptr @G, align 4
185 call void @__kmpc_end_single(ptr @2, i32 %0)
188 omp_if.end: ; preds = %omp_if.then, %entry
189 call void @__kmpc_barrier(ptr @3, i32 %0)
193 declare void @unknown_no_openmp() #2
195 declare i32 @__kmpc_global_thread_num(ptr) #3
197 declare void @__kmpc_target_deinit()
199 define weak void @__omp_offloading_14_a36502b_simple_state_machine_l22(ptr %dyn) #0 {
201 %.zero.addr = alloca i32, align 4
202 %.threadid_temp. = alloca i32, align 4
203 store i32 0, ptr %.zero.addr, align 4
204 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr %dyn)
205 %exec_user_code = icmp eq i32 %0, -1
206 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
208 user_code.entry: ; preds = %entry
209 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
210 store i32 %1, ptr %.threadid_temp., align 4
211 call void @__omp_outlined__1(ptr %.threadid_temp., ptr %.zero.addr) #3
212 call void @__kmpc_target_deinit()
215 worker.exit: ; preds = %entry
219 define internal void @__omp_outlined__1(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
221 %.global_tid..addr = alloca ptr, align 8
222 %.bound_tid..addr = alloca ptr, align 8
223 %captured_vars_addrs = alloca [0 x ptr], align 8
224 %captured_vars_addrs1 = alloca [0 x ptr], align 8
225 store ptr %.global_tid., ptr %.global_tid..addr, align 8
226 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
227 call void @unknown_no_openmp() #8
228 %0 = load ptr, ptr %.global_tid..addr, align 8
229 %1 = load i32, ptr %0, align 4
230 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr %captured_vars_addrs, i64 0)
231 call void @no_parallel_region_in_here() #7
232 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr %captured_vars_addrs1, i64 0)
236 define internal void @__omp_outlined__2(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
238 %.global_tid..addr = alloca ptr, align 8
239 %.bound_tid..addr = alloca ptr, align 8
240 store ptr %.global_tid., ptr %.global_tid..addr, align 8
241 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
246 declare void @p0() #4
248 define internal void @__omp_outlined__2_wrapper(i16 zeroext %0, i32 %1) #0 {
250 %.addr = alloca i16, align 2
251 %.addr1 = alloca i32, align 4
252 %.zero.addr = alloca i32, align 4
253 %global_args = alloca ptr, align 8
254 store i32 0, ptr %.zero.addr, align 4
255 store i16 %0, ptr %.addr, align 2
256 store i32 %1, ptr %.addr1, align 4
257 call void @__kmpc_get_shared_variables(ptr %global_args)
258 call void @__omp_outlined__2(ptr %.addr1, ptr %.zero.addr) #3
262 declare void @__kmpc_get_shared_variables(ptr)
264 declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64)
266 define internal void @__omp_outlined__3(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
268 %.global_tid..addr = alloca ptr, align 8
269 %.bound_tid..addr = alloca ptr, align 8
270 store ptr %.global_tid., ptr %.global_tid..addr, align 8
271 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
276 declare void @p1() #4
278 define internal void @__omp_outlined__3_wrapper(i16 zeroext %0, i32 %1) #0 {
280 %.addr = alloca i16, align 2
281 %.addr1 = alloca i32, align 4
282 %.zero.addr = alloca i32, align 4
283 %global_args = alloca ptr, align 8
284 store i32 0, ptr %.zero.addr, align 4
285 store i16 %0, ptr %.addr, align 2
286 store i32 %1, ptr %.addr1, align 4
287 call void @__kmpc_get_shared_variables(ptr %global_args)
288 call void @__omp_outlined__3(ptr %.addr1, ptr %.zero.addr) #3
292 define weak void @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39(ptr %dyn) #0 {
294 %.zero.addr = alloca i32, align 4
295 %.threadid_temp. = alloca i32, align 4
296 store i32 0, ptr %.zero.addr, align 4
297 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr %dyn)
298 %exec_user_code = icmp eq i32 %0, -1
299 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
301 user_code.entry: ; preds = %entry
302 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
303 store i32 %1, ptr %.threadid_temp., align 4
304 call void @__omp_outlined__4(ptr %.threadid_temp., ptr %.zero.addr) #3
305 call void @__kmpc_target_deinit()
308 worker.exit: ; preds = %entry
312 define internal void @__omp_outlined__4(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
314 %.global_tid..addr = alloca ptr, align 8
315 %.bound_tid..addr = alloca ptr, align 8
316 %captured_vars_addrs = alloca [0 x ptr], align 8
317 store ptr %.global_tid., ptr %.global_tid..addr, align 8
318 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
319 call void @unknown_no_openmp() #8
320 call void @simple_state_machine_interprocedural_before() #7
321 call void @no_parallel_region_in_here() #7
322 %0 = load ptr, ptr %.global_tid..addr, align 8
323 %1 = load i32, ptr %0, align 4
324 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr %captured_vars_addrs, i64 0)
325 call void @simple_state_machine_interprocedural_after() #7
329 define hidden void @simple_state_machine_interprocedural_before() #1 {
331 %captured_vars_addrs = alloca [0 x ptr], align 8
332 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
333 call void @__kmpc_parallel_51(ptr @2, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr %captured_vars_addrs, i64 0)
337 define internal void @__omp_outlined__5(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
339 %.global_tid..addr = alloca ptr, align 8
340 %.bound_tid..addr = alloca ptr, align 8
341 store ptr %.global_tid., ptr %.global_tid..addr, align 8
342 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
347 define internal void @__omp_outlined__5_wrapper(i16 zeroext %0, i32 %1) #0 {
349 %.addr = alloca i16, align 2
350 %.addr1 = alloca i32, align 4
351 %.zero.addr = alloca i32, align 4
352 %global_args = alloca ptr, align 8
353 store i32 0, ptr %.zero.addr, align 4
354 store i16 %0, ptr %.addr, align 2
355 store i32 %1, ptr %.addr1, align 4
356 call void @__kmpc_get_shared_variables(ptr %global_args)
357 call void @__omp_outlined__5(ptr %.addr1, ptr %.zero.addr) #3
361 define hidden void @simple_state_machine_interprocedural_after() #1 {
363 %captured_vars_addrs = alloca [0 x ptr], align 8
364 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
365 call void @__kmpc_parallel_51(ptr @2, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr %captured_vars_addrs, i64 0)
369 define weak void @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55(ptr %dyn) #0 {
371 %.zero.addr = alloca i32, align 4
372 %.threadid_temp. = alloca i32, align 4
373 store i32 0, ptr %.zero.addr, align 4
374 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr %dyn)
375 %exec_user_code = icmp eq i32 %0, -1
376 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
378 user_code.entry: ; preds = %entry
379 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
380 store i32 %1, ptr %.threadid_temp., align 4
381 call void @__omp_outlined__6(ptr %.threadid_temp., ptr %.zero.addr) #3
382 call void @__kmpc_target_deinit()
385 worker.exit: ; preds = %entry
389 define internal void @__omp_outlined__6(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
391 %.global_tid..addr = alloca ptr, align 8
392 %.bound_tid..addr = alloca ptr, align 8
393 %captured_vars_addrs = alloca [0 x ptr], align 8
394 %captured_vars_addrs1 = alloca [0 x ptr], align 8
395 store ptr %.global_tid., ptr %.global_tid..addr, align 8
396 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
397 %0 = load ptr, ptr %.global_tid..addr, align 8
398 %1 = load i32, ptr %0, align 4
399 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr %captured_vars_addrs, i64 0)
400 %call = call i32 @unknown() #7
401 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr %captured_vars_addrs1, i64 0)
405 define internal void @__omp_outlined__7(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
407 %.global_tid..addr = alloca ptr, align 8
408 %.bound_tid..addr = alloca ptr, align 8
409 store ptr %.global_tid., ptr %.global_tid..addr, align 8
410 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
415 define internal void @__omp_outlined__7_wrapper(i16 zeroext %0, i32 %1) #0 {
417 %.addr = alloca i16, align 2
418 %.addr1 = alloca i32, align 4
419 %.zero.addr = alloca i32, align 4
420 %global_args = alloca ptr, align 8
421 store i32 0, ptr %.zero.addr, align 4
422 store i16 %0, ptr %.addr, align 2
423 store i32 %1, ptr %.addr1, align 4
424 call void @__kmpc_get_shared_variables(ptr %global_args)
425 call void @__omp_outlined__7(ptr %.addr1, ptr %.zero.addr) #3
429 declare i32 @unknown() #4
431 define internal void @__omp_outlined__8(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
433 %.global_tid..addr = alloca ptr, align 8
434 %.bound_tid..addr = alloca ptr, align 8
435 store ptr %.global_tid., ptr %.global_tid..addr, align 8
436 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
441 define internal void @__omp_outlined__8_wrapper(i16 zeroext %0, i32 %1) #0 {
443 %.addr = alloca i16, align 2
444 %.addr1 = alloca i32, align 4
445 %.zero.addr = alloca i32, align 4
446 %global_args = alloca ptr, align 8
447 store i32 0, ptr %.zero.addr, align 4
448 store i16 %0, ptr %.addr, align 2
449 store i32 %1, ptr %.addr1, align 4
450 call void @__kmpc_get_shared_variables(ptr %global_args)
451 call void @__omp_outlined__8(ptr %.addr1, ptr %.zero.addr) #3
455 define weak void @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66(ptr %dyn) #0 {
457 %.zero.addr = alloca i32, align 4
458 %.threadid_temp. = alloca i32, align 4
459 store i32 0, ptr %.zero.addr, align 4
460 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr %dyn)
461 %exec_user_code = icmp eq i32 %0, -1
462 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
464 user_code.entry: ; preds = %entry
465 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
466 store i32 %1, ptr %.threadid_temp., align 4
467 call void @__omp_outlined__9(ptr %.threadid_temp., ptr %.zero.addr) #3
468 call void @__kmpc_target_deinit()
471 worker.exit: ; preds = %entry
475 define internal void @__omp_outlined__9(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
477 %.global_tid..addr = alloca ptr, align 8
478 %.bound_tid..addr = alloca ptr, align 8
479 %captured_vars_addrs = alloca [0 x ptr], align 8
480 %captured_vars_addrs1 = alloca [0 x ptr], align 8
481 store ptr %.global_tid., ptr %.global_tid..addr, align 8
482 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
483 %0 = load ptr, ptr %.global_tid..addr, align 8
484 %1 = load i32, ptr %0, align 4
485 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr %captured_vars_addrs, i64 0)
486 call void @unknown_no_openmp() #8
487 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr %captured_vars_addrs1, i64 0)
491 define internal void @__omp_outlined__10(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
493 %.global_tid..addr = alloca ptr, align 8
494 %.bound_tid..addr = alloca ptr, align 8
495 store ptr %.global_tid., ptr %.global_tid..addr, align 8
496 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
501 define internal void @__omp_outlined__10_wrapper(i16 zeroext %0, i32 %1) #0 {
503 %.addr = alloca i16, align 2
504 %.addr1 = alloca i32, align 4
505 %.zero.addr = alloca i32, align 4
506 %global_args = alloca ptr, align 8
507 store i32 0, ptr %.zero.addr, align 4
508 store i16 %0, ptr %.addr, align 2
509 store i32 %1, ptr %.addr1, align 4
510 call void @__kmpc_get_shared_variables(ptr %global_args)
511 call void @__omp_outlined__10(ptr %.addr1, ptr %.zero.addr) #3
515 define internal void @__omp_outlined__11(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
517 %.global_tid..addr = alloca ptr, align 8
518 %.bound_tid..addr = alloca ptr, align 8
519 store ptr %.global_tid., ptr %.global_tid..addr, align 8
520 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
525 define internal void @__omp_outlined__11_wrapper(i16 zeroext %0, i32 %1) #0 {
527 %.addr = alloca i16, align 2
528 %.addr1 = alloca i32, align 4
529 %.zero.addr = alloca i32, align 4
530 %global_args = alloca ptr, align 8
531 store i32 0, ptr %.zero.addr, align 4
532 store i16 %0, ptr %.addr, align 2
533 store i32 %1, ptr %.addr1, align 4
534 call void @__kmpc_get_shared_variables(ptr %global_args)
535 call void @__omp_outlined__11(ptr %.addr1, ptr %.zero.addr) #3
539 define weak void @__omp_offloading_14_a36502b_simple_state_machine_pure_l77(ptr %dyn) #0 {
541 %.zero.addr = alloca i32, align 4
542 %.threadid_temp. = alloca i32, align 4
543 store i32 0, ptr %.zero.addr, align 4
544 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr %dyn)
545 %exec_user_code = icmp eq i32 %0, -1
546 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
548 user_code.entry: ; preds = %entry
549 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
550 store i32 %1, ptr %.threadid_temp., align 4
551 call void @__omp_outlined__12(ptr %.threadid_temp., ptr %.zero.addr) #3
552 call void @__kmpc_target_deinit()
555 worker.exit: ; preds = %entry
559 define internal void @__omp_outlined__12(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
561 %.global_tid..addr = alloca ptr, align 8
562 %.bound_tid..addr = alloca ptr, align 8
563 %captured_vars_addrs = alloca [0 x ptr], align 8
564 %captured_vars_addrs1 = alloca [0 x ptr], align 8
565 store ptr %.global_tid., ptr %.global_tid..addr, align 8
566 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
567 call void @unknown_no_openmp() #8
568 %0 = load ptr, ptr %.global_tid..addr, align 8
569 %1 = load i32, ptr %0, align 4
570 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr %captured_vars_addrs, i64 0)
571 call void @unknown_pure() #9
572 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr %captured_vars_addrs1, i64 0)
576 define internal void @__omp_outlined__13(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
578 %.global_tid..addr = alloca ptr, align 8
579 %.bound_tid..addr = alloca ptr, align 8
580 store ptr %.global_tid., ptr %.global_tid..addr, align 8
581 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
586 define internal void @__omp_outlined__13_wrapper(i16 zeroext %0, i32 %1) #0 {
588 %.addr = alloca i16, align 2
589 %.addr1 = alloca i32, align 4
590 %.zero.addr = alloca i32, align 4
591 %global_args = alloca ptr, align 8
592 store i32 0, ptr %.zero.addr, align 4
593 store i16 %0, ptr %.addr, align 2
594 store i32 %1, ptr %.addr1, align 4
595 call void @__kmpc_get_shared_variables(ptr %global_args)
596 call void @__omp_outlined__13(ptr %.addr1, ptr %.zero.addr) #3
600 declare void @unknown_pure() #5
602 define internal void @__omp_outlined__14(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
604 %.global_tid..addr = alloca ptr, align 8
605 %.bound_tid..addr = alloca ptr, align 8
606 store ptr %.global_tid., ptr %.global_tid..addr, align 8
607 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
612 define internal void @__omp_outlined__14_wrapper(i16 zeroext %0, i32 %1) #0 {
614 %.addr = alloca i16, align 2
615 %.addr1 = alloca i32, align 4
616 %.zero.addr = alloca i32, align 4
617 %global_args = alloca ptr, align 8
618 store i32 0, ptr %.zero.addr, align 4
619 store i16 %0, ptr %.addr, align 2
620 store i32 %1, ptr %.addr1, align 4
621 call void @__kmpc_get_shared_variables(ptr %global_args)
622 call void @__omp_outlined__14(ptr %.addr1, ptr %.zero.addr) #3
626 define weak void @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92(ptr %dyn) #0 {
628 %.zero.addr = alloca i32, align 4
629 %.threadid_temp. = alloca i32, align 4
630 store i32 0, ptr %.zero.addr, align 4
631 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr %dyn)
632 %exec_user_code = icmp eq i32 %0, -1
633 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
635 user_code.entry: ; preds = %entry
636 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
637 store i32 %1, ptr %.threadid_temp., align 4
638 call void @__omp_outlined__15(ptr %.threadid_temp., ptr %.zero.addr) #3
639 call void @__kmpc_target_deinit()
642 worker.exit: ; preds = %entry
646 define internal void @__omp_outlined__15(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
648 %.global_tid..addr = alloca ptr, align 8
649 %.bound_tid..addr = alloca ptr, align 8
650 store ptr %.global_tid., ptr %.global_tid..addr, align 8
651 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
652 %call = call i32 @omp_get_thread_num() #7
653 call void @simple_state_machine_interprocedural_nested_recursive_after(i32 %call) #7
657 define hidden void @simple_state_machine_interprocedural_nested_recursive_after(i32 %a) #1 {
659 %a.addr = alloca i32, align 4
660 store i32 %a, ptr %a.addr, align 4
661 %0 = load i32, ptr %a.addr, align 4
662 %cmp = icmp eq i32 %0, 0
663 br i1 %cmp, label %if.then, label %if.end
665 if.then: ; preds = %entry
668 if.end: ; preds = %entry
669 %1 = load i32, ptr %a.addr, align 4
670 %sub = sub nsw i32 %1, 1
671 call void @simple_state_machine_interprocedural_nested_recursive_after(i32 %sub) #7
672 call void @simple_state_machine_interprocedural_nested_recursive_after_after() #7
675 return: ; preds = %if.end, %if.then
679 declare i32 @omp_get_thread_num(...) #4
681 define weak void @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112(ptr %dyn) #0 {
683 %.zero.addr = alloca i32, align 4
684 %.threadid_temp. = alloca i32, align 4
685 store i32 0, ptr %.zero.addr, align 4
686 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr %dyn)
687 %exec_user_code = icmp eq i32 %0, -1
688 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
690 user_code.entry: ; preds = %entry
691 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
692 store i32 %1, ptr %.threadid_temp., align 4
693 call void @__omp_outlined__16(ptr %.threadid_temp., ptr %.zero.addr) #3
694 call void @__kmpc_target_deinit()
697 worker.exit: ; preds = %entry
701 define internal void @__omp_outlined__16(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
703 %.global_tid..addr = alloca ptr, align 8
704 %.bound_tid..addr = alloca ptr, align 8
705 store ptr %.global_tid., ptr %.global_tid..addr, align 8
706 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
707 call void @weak_callee_empty() #7
711 define weak hidden void @weak_callee_empty() #1 {
716 declare i32 @__kmpc_single(ptr, i32) #6
718 declare void @__kmpc_end_single(ptr, i32) #6
720 declare void @__kmpc_barrier(ptr, i32) #6
722 define internal void @__omp_outlined__17(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
724 %.global_tid..addr = alloca ptr, align 8
725 %.bound_tid..addr = alloca ptr, align 8
726 store ptr %.global_tid., ptr %.global_tid..addr, align 8
727 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
732 define internal void @__omp_outlined__17_wrapper(i16 zeroext %0, i32 %1) #0 {
734 %.addr = alloca i16, align 2
735 %.addr1 = alloca i32, align 4
736 %.zero.addr = alloca i32, align 4
737 %global_args = alloca ptr, align 8
738 store i32 0, ptr %.zero.addr, align 4
739 store i16 %0, ptr %.addr, align 2
740 store i32 %1, ptr %.addr1, align 4
741 call void @__kmpc_get_shared_variables(ptr %global_args)
742 call void @__omp_outlined__17(ptr %.addr1, ptr %.zero.addr) #3
746 define internal void @__omp_outlined__18(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
748 %.global_tid..addr = alloca ptr, align 8
749 %.bound_tid..addr = alloca ptr, align 8
750 store ptr %.global_tid., ptr %.global_tid..addr, align 8
751 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
756 define internal void @__omp_outlined__18_wrapper(i16 zeroext %0, i32 %1) #0 {
758 %.addr = alloca i16, align 2
759 %.addr1 = alloca i32, align 4
760 %.zero.addr = alloca i32, align 4
761 %global_args = alloca ptr, align 8
762 store i32 0, ptr %.zero.addr, align 4
763 store i16 %0, ptr %.addr, align 2
764 store i32 %1, ptr %.addr1, align 4
765 call void @__kmpc_get_shared_variables(ptr %global_args)
766 call void @__omp_outlined__18(ptr %.addr1, ptr %.zero.addr) #3
770 define hidden void @simple_state_machine_interprocedural_nested_recursive_after_after() #1 {
772 %captured_vars_addrs = alloca [0 x ptr], align 8
773 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
774 call void @__kmpc_parallel_51(ptr @2, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr %captured_vars_addrs, i64 0)
778 define internal void @__omp_outlined__19(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
780 %.global_tid..addr = alloca ptr, align 8
781 %.bound_tid..addr = alloca ptr, align 8
782 store ptr %.global_tid., ptr %.global_tid..addr, align 8
783 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
788 define internal void @__omp_outlined__19_wrapper(i16 zeroext %0, i32 %1) #0 {
790 %.addr = alloca i16, align 2
791 %.addr1 = alloca i32, align 4
792 %.zero.addr = alloca i32, align 4
793 %global_args = alloca ptr, align 8
794 store i32 0, ptr %.zero.addr, align 4
795 store i16 %0, ptr %.addr, align 2
796 store i32 %1, ptr %.addr1, align 4
797 call void @__kmpc_get_shared_variables(ptr %global_args)
798 call void @__omp_outlined__19(ptr %.addr1, ptr %.zero.addr) #3
802 attributes #0 = { convergent noinline norecurse nounwind "kernel" "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
803 attributes #1 = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
804 attributes #2 = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
805 attributes #3 = { nounwind }
806 attributes #4 = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
807 attributes #5 = { convergent nounwind readonly willreturn "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
808 attributes #6 = { convergent nounwind }
809 attributes #7 = { convergent }
810 attributes #8 = { convergent "llvm.assume"="omp_no_openmp" }
811 attributes #9 = { convergent nounwind readonly willreturn }
813 !omp_offload.info = !{!0, !1, !2, !3, !4, !5, !6, !7}
814 !nvvm.annotations = !{!8, !9, !10, !11, !12, !13, !14, !15}
815 !llvm.module.flags = !{!16, !17, !18}
817 !0 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
818 !1 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
819 !2 = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
820 !3 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
821 !4 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
822 !5 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
823 !6 = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
824 !7 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
825 !8 = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
826 !9 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
827 !10 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
828 !11 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
829 !12 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
830 !13 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
831 !14 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
832 !15 = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
833 !16 = !{i32 1, !"wchar_size", i32 4}
834 !17 = !{i32 7, !"openmp", i32 50}
835 !18 = !{i32 7, !"openmp-device", i32 50}
837 ; AMDGPU1: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
838 ; AMDGPU1: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
839 ; AMDGPU1: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
840 ; AMDGPU1: @G = external global i32, align 4
841 ; AMDGPU1: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
842 ; AMDGPU1: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
843 ; AMDGPU1: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
844 ; AMDGPU1: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
845 ; AMDGPU1: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
846 ; AMDGPU1: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
847 ; AMDGPU1: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
848 ; AMDGPU1: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
849 ; AMDGPU1: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
851 ; NVPTX1: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
852 ; NVPTX1: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
853 ; NVPTX1: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
854 ; NVPTX1: @G = external global i32, align 4
855 ; NVPTX1: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
856 ; NVPTX1: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
857 ; NVPTX1: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
858 ; NVPTX1: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
859 ; NVPTX1: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
860 ; NVPTX1: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
861 ; NVPTX1: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
862 ; NVPTX1: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
863 ; NVPTX1: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
865 ; AMDGPU2: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
866 ; AMDGPU2: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
867 ; AMDGPU2: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
868 ; AMDGPU2: @G = external global i32, align 4
869 ; AMDGPU2: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
870 ; AMDGPU2: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
871 ; AMDGPU2: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
872 ; AMDGPU2: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
873 ; AMDGPU2: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
874 ; AMDGPU2: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
875 ; AMDGPU2: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
876 ; AMDGPU2: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
877 ; AMDGPU2: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
879 ; AMDGPU3: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
880 ; AMDGPU3: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
881 ; AMDGPU3: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
882 ; AMDGPU3: @G = external global i32, align 4
883 ; AMDGPU3: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
884 ; AMDGPU3: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
885 ; AMDGPU3: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
886 ; AMDGPU3: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
887 ; AMDGPU3: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
888 ; AMDGPU3: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
889 ; AMDGPU3: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
890 ; AMDGPU3: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
891 ; AMDGPU3: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
893 ; NVPTX2: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
894 ; NVPTX2: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
895 ; NVPTX2: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
896 ; NVPTX2: @G = external global i32, align 4
897 ; NVPTX2: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
898 ; NVPTX2: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
899 ; NVPTX2: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
900 ; NVPTX2: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
901 ; NVPTX2: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
902 ; NVPTX2: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
903 ; NVPTX2: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
904 ; NVPTX2: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
905 ; NVPTX2: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
907 ; NVPTX3: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
908 ; NVPTX3: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
909 ; NVPTX3: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
910 ; NVPTX3: @G = external global i32, align 4
911 ; NVPTX3: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
912 ; NVPTX3: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
913 ; NVPTX3: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
914 ; NVPTX3: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
915 ; NVPTX3: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
916 ; NVPTX3: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
917 ; NVPTX3: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
918 ; NVPTX3: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
919 ; NVPTX3: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
921 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
922 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
923 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
924 ; AMDGPU1-NEXT: entry:
925 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
926 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
927 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
928 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
929 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
930 ; AMDGPU1: user_code.entry:
931 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
932 ; AMDGPU1-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
933 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
934 ; AMDGPU1-NEXT: ret void
935 ; AMDGPU1: worker.exit:
936 ; AMDGPU1-NEXT: ret void
939 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
940 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__
941 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
942 ; AMDGPU1-NEXT: entry:
943 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
944 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
945 ; AMDGPU1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
946 ; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
947 ; AMDGPU1-NEXT: ret void
950 ; AMDGPU1: Function Attrs: convergent noinline nounwind
951 ; AMDGPU1-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
952 ; AMDGPU1-SAME: () #[[ATTR1:[0-9]+]] {
953 ; AMDGPU1-NEXT: entry:
954 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
955 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
956 ; AMDGPU1-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
957 ; AMDGPU1-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
958 ; AMDGPU1: omp_if.then:
959 ; AMDGPU1-NEXT: store i32 0, ptr @G, align 4
960 ; AMDGPU1-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
961 ; AMDGPU1-NEXT: br label [[OMP_IF_END]]
962 ; AMDGPU1: omp_if.end:
963 ; AMDGPU1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
964 ; AMDGPU1-NEXT: ret void
967 ; AMDGPU1: Function Attrs: convergent noinline nounwind
968 ; AMDGPU1-LABEL: define {{[^@]+}}@no_parallel_region_in_here
969 ; AMDGPU1-SAME: () #[[ATTR1]] {
970 ; AMDGPU1-NEXT: entry:
971 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
972 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
973 ; AMDGPU1-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
974 ; AMDGPU1-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
975 ; AMDGPU1: omp_if.then:
976 ; AMDGPU1-NEXT: store i32 0, ptr @G, align 4
977 ; AMDGPU1-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
978 ; AMDGPU1-NEXT: br label [[OMP_IF_END]]
979 ; AMDGPU1: omp_if.end:
980 ; AMDGPU1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
981 ; AMDGPU1-NEXT: ret void
984 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
985 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
986 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
987 ; AMDGPU1-NEXT: entry:
988 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
989 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
990 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
991 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
992 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
993 ; AMDGPU1: user_code.entry:
994 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
995 ; AMDGPU1-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
996 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
997 ; AMDGPU1-NEXT: ret void
998 ; AMDGPU1: worker.exit:
999 ; AMDGPU1-NEXT: ret void
1002 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1003 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__1
1004 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1005 ; AMDGPU1-NEXT: entry:
1006 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1007 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1008 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1009 ; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1010 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1011 ; AMDGPU1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
1012 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1013 ; AMDGPU1-NEXT: ret void
1016 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1017 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__2
1018 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1019 ; AMDGPU1-NEXT: entry:
1020 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1021 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1022 ; AMDGPU1-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
1023 ; AMDGPU1-NEXT: ret void
1026 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1027 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
1028 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1029 ; AMDGPU1-NEXT: entry:
1030 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1031 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1032 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1033 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1034 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1035 ; AMDGPU1-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1036 ; AMDGPU1-NEXT: ret void
1039 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1040 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__3
1041 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1042 ; AMDGPU1-NEXT: entry:
1043 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1044 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1045 ; AMDGPU1-NEXT: call void @p1() #[[ATTR11]]
1046 ; AMDGPU1-NEXT: ret void
1049 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1050 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
1051 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1052 ; AMDGPU1-NEXT: entry:
1053 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1054 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1055 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1056 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1057 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1058 ; AMDGPU1-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1059 ; AMDGPU1-NEXT: ret void
1062 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1063 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
1064 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1065 ; AMDGPU1-NEXT: entry:
1066 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1067 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1068 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
1069 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1070 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1071 ; AMDGPU1: user_code.entry:
1072 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1073 ; AMDGPU1-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1074 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
1075 ; AMDGPU1-NEXT: ret void
1076 ; AMDGPU1: worker.exit:
1077 ; AMDGPU1-NEXT: ret void
1080 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1081 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__4
1082 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1083 ; AMDGPU1-NEXT: entry:
1084 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1085 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1086 ; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1087 ; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
1088 ; AMDGPU1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
1089 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1090 ; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
1091 ; AMDGPU1-NEXT: ret void
1094 ; AMDGPU1: Function Attrs: noinline nounwind
1095 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
1096 ; AMDGPU1-SAME: () #[[ATTR6:[0-9]+]] {
1097 ; AMDGPU1-NEXT: entry:
1098 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1099 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1100 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1101 ; AMDGPU1-NEXT: ret void
1104 ; AMDGPU1: Function Attrs: convergent noinline nounwind
1105 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
1106 ; AMDGPU1-SAME: () #[[ATTR1]] {
1107 ; AMDGPU1-NEXT: entry:
1108 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1109 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1110 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1111 ; AMDGPU1-NEXT: ret void
1114 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1115 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__5
1116 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1117 ; AMDGPU1-NEXT: entry:
1118 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1119 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1120 ; AMDGPU1-NEXT: call void @p1() #[[ATTR11]]
1121 ; AMDGPU1-NEXT: ret void
1124 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1125 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
1126 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1127 ; AMDGPU1-NEXT: entry:
1128 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1129 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1130 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1131 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1132 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1133 ; AMDGPU1-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1134 ; AMDGPU1-NEXT: ret void
1137 ; AMDGPU1: Function Attrs: noinline nounwind
1138 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
1139 ; AMDGPU1-SAME: () #[[ATTR6]] {
1140 ; AMDGPU1-NEXT: entry:
1141 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1142 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1143 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1144 ; AMDGPU1-NEXT: ret void
1147 ; AMDGPU1: Function Attrs: convergent noinline nounwind
1148 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
1149 ; AMDGPU1-SAME: () #[[ATTR1]] {
1150 ; AMDGPU1-NEXT: entry:
1151 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1152 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1153 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1154 ; AMDGPU1-NEXT: ret void
1157 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1158 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
1159 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1160 ; AMDGPU1-NEXT: entry:
1161 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1162 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1163 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
1164 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1165 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1166 ; AMDGPU1: user_code.entry:
1167 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1168 ; AMDGPU1-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1169 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
1170 ; AMDGPU1-NEXT: ret void
1171 ; AMDGPU1: worker.exit:
1172 ; AMDGPU1-NEXT: ret void
1175 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1176 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__6
1177 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1178 ; AMDGPU1-NEXT: entry:
1179 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1180 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1181 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1182 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1183 ; AMDGPU1-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
1184 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1185 ; AMDGPU1-NEXT: ret void
1188 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1189 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__7
1190 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1191 ; AMDGPU1-NEXT: entry:
1192 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1193 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1194 ; AMDGPU1-NEXT: call void @p0() #[[ATTR11]]
1195 ; AMDGPU1-NEXT: ret void
1198 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1199 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
1200 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1201 ; AMDGPU1-NEXT: entry:
1202 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1203 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1204 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1205 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1206 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1207 ; AMDGPU1-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1208 ; AMDGPU1-NEXT: ret void
1211 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1212 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__8
1213 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1214 ; AMDGPU1-NEXT: entry:
1215 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1216 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1217 ; AMDGPU1-NEXT: call void @p1() #[[ATTR11]]
1218 ; AMDGPU1-NEXT: ret void
1221 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1222 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
1223 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1224 ; AMDGPU1-NEXT: entry:
1225 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1226 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1227 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1228 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1229 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1230 ; AMDGPU1-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1231 ; AMDGPU1-NEXT: ret void
1234 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1235 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
1236 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1237 ; AMDGPU1-NEXT: entry:
1238 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1239 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1240 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
1241 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1242 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1243 ; AMDGPU1: user_code.entry:
1244 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1245 ; AMDGPU1-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1246 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
1247 ; AMDGPU1-NEXT: ret void
1248 ; AMDGPU1: worker.exit:
1249 ; AMDGPU1-NEXT: ret void
1252 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1253 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__9
1254 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1255 ; AMDGPU1-NEXT: entry:
1256 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1257 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1258 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1259 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1260 ; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1261 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1262 ; AMDGPU1-NEXT: ret void
1265 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1266 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__10
1267 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1268 ; AMDGPU1-NEXT: entry:
1269 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1270 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1271 ; AMDGPU1-NEXT: call void @p0() #[[ATTR11]]
1272 ; AMDGPU1-NEXT: ret void
1275 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1276 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
1277 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1278 ; AMDGPU1-NEXT: entry:
1279 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1280 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1281 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1282 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1283 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1284 ; AMDGPU1-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1285 ; AMDGPU1-NEXT: ret void
1288 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1289 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__11
1290 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1291 ; AMDGPU1-NEXT: entry:
1292 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1293 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1294 ; AMDGPU1-NEXT: call void @p1() #[[ATTR11]]
1295 ; AMDGPU1-NEXT: ret void
1298 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1299 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
1300 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1301 ; AMDGPU1-NEXT: entry:
1302 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1303 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1304 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1305 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1306 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1307 ; AMDGPU1-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1308 ; AMDGPU1-NEXT: ret void
1311 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1312 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
1313 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1314 ; AMDGPU1-NEXT: entry:
1315 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1316 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1317 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
1318 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1319 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1320 ; AMDGPU1: user_code.entry:
1321 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1322 ; AMDGPU1-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1323 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
1324 ; AMDGPU1-NEXT: ret void
1325 ; AMDGPU1: worker.exit:
1326 ; AMDGPU1-NEXT: ret void
1329 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1330 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__12
1331 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1332 ; AMDGPU1-NEXT: entry:
1333 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1334 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1335 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1336 ; AMDGPU1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1337 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1338 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1339 ; AMDGPU1-NEXT: ret void
1342 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1343 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__13
1344 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1345 ; AMDGPU1-NEXT: entry:
1346 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1347 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1348 ; AMDGPU1-NEXT: call void @p0() #[[ATTR11]]
1349 ; AMDGPU1-NEXT: ret void
1352 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1353 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
1354 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1355 ; AMDGPU1-NEXT: entry:
1356 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1357 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1358 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1359 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1360 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1361 ; AMDGPU1-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1362 ; AMDGPU1-NEXT: ret void
1365 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1366 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__14
1367 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1368 ; AMDGPU1-NEXT: entry:
1369 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1370 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1371 ; AMDGPU1-NEXT: call void @p1() #[[ATTR11]]
1372 ; AMDGPU1-NEXT: ret void
1375 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1376 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
1377 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1378 ; AMDGPU1-NEXT: entry:
1379 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1380 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1381 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1382 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1383 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1384 ; AMDGPU1-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1385 ; AMDGPU1-NEXT: ret void
1388 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1389 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
1390 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1391 ; AMDGPU1-NEXT: entry:
1392 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1393 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1394 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
1395 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1396 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1397 ; AMDGPU1: user_code.entry:
1398 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1399 ; AMDGPU1-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1400 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
1401 ; AMDGPU1-NEXT: ret void
1402 ; AMDGPU1: worker.exit:
1403 ; AMDGPU1-NEXT: ret void
1406 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1407 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__15
1408 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1409 ; AMDGPU1-NEXT: entry:
1410 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1411 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1412 ; AMDGPU1-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
1413 ; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
1414 ; AMDGPU1-NEXT: ret void
1417 ; AMDGPU1: Function Attrs: noinline nounwind
1418 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
1419 ; AMDGPU1-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
1420 ; AMDGPU1-NEXT: entry:
1421 ; AMDGPU1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
1422 ; AMDGPU1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
1423 ; AMDGPU1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
1424 ; AMDGPU1-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
1425 ; AMDGPU1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
1427 ; AMDGPU1-NEXT: br label [[RETURN:%.*]]
1429 ; AMDGPU1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
1430 ; AMDGPU1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
1431 ; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
1432 ; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
1433 ; AMDGPU1-NEXT: br label [[RETURN]]
1435 ; AMDGPU1-NEXT: ret void
1438 ; AMDGPU1: Function Attrs: convergent noinline nounwind
1439 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
1440 ; AMDGPU1-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
1441 ; AMDGPU1-NEXT: entry:
1442 ; AMDGPU1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
1443 ; AMDGPU1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
1444 ; AMDGPU1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
1445 ; AMDGPU1-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
1446 ; AMDGPU1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
1448 ; AMDGPU1-NEXT: br label [[RETURN:%.*]]
1450 ; AMDGPU1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
1451 ; AMDGPU1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
1452 ; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
1453 ; AMDGPU1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
1454 ; AMDGPU1-NEXT: br label [[RETURN]]
1456 ; AMDGPU1-NEXT: ret void
1459 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1460 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
1461 ; AMDGPU1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1462 ; AMDGPU1-NEXT: entry:
1463 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1464 ; AMDGPU1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1465 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
1466 ; AMDGPU1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1467 ; AMDGPU1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1468 ; AMDGPU1: user_code.entry:
1469 ; AMDGPU1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1470 ; AMDGPU1-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1471 ; AMDGPU1-NEXT: call void @__kmpc_target_deinit()
1472 ; AMDGPU1-NEXT: ret void
1473 ; AMDGPU1: worker.exit:
1474 ; AMDGPU1-NEXT: ret void
1477 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1478 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__16
1479 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1480 ; AMDGPU1-NEXT: entry:
1481 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1482 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1483 ; AMDGPU1-NEXT: call void @weak_callee_empty() #[[ATTR9]]
1484 ; AMDGPU1-NEXT: ret void
1487 ; AMDGPU1: Function Attrs: convergent noinline nounwind
1488 ; AMDGPU1-LABEL: define {{[^@]+}}@weak_callee_empty
1489 ; AMDGPU1-SAME: () #[[ATTR1]] {
1490 ; AMDGPU1-NEXT: entry:
1491 ; AMDGPU1-NEXT: ret void
1494 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1495 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__17
1496 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1497 ; AMDGPU1-NEXT: entry:
1498 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1499 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1500 ; AMDGPU1-NEXT: call void @p0() #[[ATTR11]]
1501 ; AMDGPU1-NEXT: ret void
1504 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1505 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
1506 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1507 ; AMDGPU1-NEXT: entry:
1508 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1509 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1510 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1511 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1512 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1513 ; AMDGPU1-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1514 ; AMDGPU1-NEXT: ret void
1517 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1518 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__18
1519 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1520 ; AMDGPU1-NEXT: entry:
1521 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1522 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1523 ; AMDGPU1-NEXT: call void @p0() #[[ATTR11]]
1524 ; AMDGPU1-NEXT: ret void
1527 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1528 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
1529 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1530 ; AMDGPU1-NEXT: entry:
1531 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1532 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1533 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1534 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1535 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1536 ; AMDGPU1-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1537 ; AMDGPU1-NEXT: ret void
1540 ; AMDGPU1: Function Attrs: noinline nounwind
1541 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
1542 ; AMDGPU1-SAME: () #[[ATTR6]] {
1543 ; AMDGPU1-NEXT: entry:
1544 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1545 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1546 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1547 ; AMDGPU1-NEXT: ret void
1550 ; AMDGPU1: Function Attrs: convergent noinline nounwind
1551 ; AMDGPU1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
1552 ; AMDGPU1-SAME: () #[[ATTR1]] {
1553 ; AMDGPU1-NEXT: entry:
1554 ; AMDGPU1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1555 ; AMDGPU1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1556 ; AMDGPU1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1557 ; AMDGPU1-NEXT: ret void
1560 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1561 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__19
1562 ; AMDGPU1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1563 ; AMDGPU1-NEXT: entry:
1564 ; AMDGPU1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1565 ; AMDGPU1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1566 ; AMDGPU1-NEXT: call void @p0() #[[ATTR11]]
1567 ; AMDGPU1-NEXT: ret void
1570 ; AMDGPU1: Function Attrs: convergent noinline norecurse nounwind
1571 ; AMDGPU1-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
1572 ; AMDGPU1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1573 ; AMDGPU1-NEXT: entry:
1574 ; AMDGPU1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1575 ; AMDGPU1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1576 ; AMDGPU1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1577 ; AMDGPU1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1578 ; AMDGPU1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1579 ; AMDGPU1-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1580 ; AMDGPU1-NEXT: ret void
1583 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1584 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
1585 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
1586 ; NVPTX1-NEXT: entry:
1587 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1588 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1589 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
1590 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1591 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1592 ; NVPTX1: user_code.entry:
1593 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
1594 ; NVPTX1-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1595 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
1596 ; NVPTX1-NEXT: ret void
1597 ; NVPTX1: worker.exit:
1598 ; NVPTX1-NEXT: ret void
1601 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1602 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__
1603 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1604 ; NVPTX1-NEXT: entry:
1605 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1606 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1607 ; NVPTX1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
1608 ; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
1609 ; NVPTX1-NEXT: ret void
1612 ; NVPTX1: Function Attrs: convergent noinline nounwind
1613 ; NVPTX1-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
1614 ; NVPTX1-SAME: () #[[ATTR1:[0-9]+]] {
1615 ; NVPTX1-NEXT: entry:
1616 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1617 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
1618 ; NVPTX1-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1619 ; NVPTX1-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1620 ; NVPTX1: omp_if.then:
1621 ; NVPTX1-NEXT: store i32 0, ptr @G, align 4
1622 ; NVPTX1-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
1623 ; NVPTX1-NEXT: br label [[OMP_IF_END]]
1624 ; NVPTX1: omp_if.end:
1625 ; NVPTX1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
1626 ; NVPTX1-NEXT: ret void
1629 ; NVPTX1: Function Attrs: convergent noinline nounwind
1630 ; NVPTX1-LABEL: define {{[^@]+}}@no_parallel_region_in_here
1631 ; NVPTX1-SAME: () #[[ATTR1]] {
1632 ; NVPTX1-NEXT: entry:
1633 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1634 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
1635 ; NVPTX1-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1636 ; NVPTX1-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1637 ; NVPTX1: omp_if.then:
1638 ; NVPTX1-NEXT: store i32 0, ptr @G, align 4
1639 ; NVPTX1-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
1640 ; NVPTX1-NEXT: br label [[OMP_IF_END]]
1641 ; NVPTX1: omp_if.end:
1642 ; NVPTX1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
1643 ; NVPTX1-NEXT: ret void
1646 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1647 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
1648 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1649 ; NVPTX1-NEXT: entry:
1650 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1651 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1652 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
1653 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1654 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1655 ; NVPTX1: user_code.entry:
1656 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1657 ; NVPTX1-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1658 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
1659 ; NVPTX1-NEXT: ret void
1660 ; NVPTX1: worker.exit:
1661 ; NVPTX1-NEXT: ret void
1664 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1665 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__1
1666 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1667 ; NVPTX1-NEXT: entry:
1668 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1669 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1670 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1671 ; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1672 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1673 ; NVPTX1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
1674 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1675 ; NVPTX1-NEXT: ret void
1678 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1679 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__2
1680 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1681 ; NVPTX1-NEXT: entry:
1682 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1683 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1684 ; NVPTX1-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
1685 ; NVPTX1-NEXT: ret void
1688 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1689 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
1690 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1691 ; NVPTX1-NEXT: entry:
1692 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1693 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1694 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1695 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1696 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1697 ; NVPTX1-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1698 ; NVPTX1-NEXT: ret void
1701 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1702 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__3
1703 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1704 ; NVPTX1-NEXT: entry:
1705 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1706 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1707 ; NVPTX1-NEXT: call void @p1() #[[ATTR11]]
1708 ; NVPTX1-NEXT: ret void
1711 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1712 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
1713 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1714 ; NVPTX1-NEXT: entry:
1715 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1716 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1717 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1718 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1719 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1720 ; NVPTX1-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1721 ; NVPTX1-NEXT: ret void
1724 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1725 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
1726 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1727 ; NVPTX1-NEXT: entry:
1728 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1729 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1730 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
1731 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1732 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1733 ; NVPTX1: user_code.entry:
1734 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1735 ; NVPTX1-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1736 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
1737 ; NVPTX1-NEXT: ret void
1738 ; NVPTX1: worker.exit:
1739 ; NVPTX1-NEXT: ret void
1742 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1743 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__4
1744 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1745 ; NVPTX1-NEXT: entry:
1746 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1747 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1748 ; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1749 ; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
1750 ; NVPTX1-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
1751 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1752 ; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
1753 ; NVPTX1-NEXT: ret void
1756 ; NVPTX1: Function Attrs: noinline nounwind
1757 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
1758 ; NVPTX1-SAME: () #[[ATTR6:[0-9]+]] {
1759 ; NVPTX1-NEXT: entry:
1760 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1761 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1762 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1763 ; NVPTX1-NEXT: ret void
1766 ; NVPTX1: Function Attrs: convergent noinline nounwind
1767 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
1768 ; NVPTX1-SAME: () #[[ATTR1]] {
1769 ; NVPTX1-NEXT: entry:
1770 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1771 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1772 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1773 ; NVPTX1-NEXT: ret void
1776 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1777 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__5
1778 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1779 ; NVPTX1-NEXT: entry:
1780 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1781 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1782 ; NVPTX1-NEXT: call void @p1() #[[ATTR11]]
1783 ; NVPTX1-NEXT: ret void
1786 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1787 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
1788 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1789 ; NVPTX1-NEXT: entry:
1790 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1791 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1792 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1793 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1794 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1795 ; NVPTX1-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1796 ; NVPTX1-NEXT: ret void
1799 ; NVPTX1: Function Attrs: noinline nounwind
1800 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
1801 ; NVPTX1-SAME: () #[[ATTR6]] {
1802 ; NVPTX1-NEXT: entry:
1803 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1804 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1805 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1806 ; NVPTX1-NEXT: ret void
1809 ; NVPTX1: Function Attrs: convergent noinline nounwind
1810 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
1811 ; NVPTX1-SAME: () #[[ATTR1]] {
1812 ; NVPTX1-NEXT: entry:
1813 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1814 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1815 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1816 ; NVPTX1-NEXT: ret void
1819 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1820 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
1821 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1822 ; NVPTX1-NEXT: entry:
1823 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1824 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1825 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
1826 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1827 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1828 ; NVPTX1: user_code.entry:
1829 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1830 ; NVPTX1-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1831 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
1832 ; NVPTX1-NEXT: ret void
1833 ; NVPTX1: worker.exit:
1834 ; NVPTX1-NEXT: ret void
1837 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1838 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__6
1839 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1840 ; NVPTX1-NEXT: entry:
1841 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1842 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1843 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1844 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1845 ; NVPTX1-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
1846 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1847 ; NVPTX1-NEXT: ret void
1850 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1851 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__7
1852 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1853 ; NVPTX1-NEXT: entry:
1854 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1855 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1856 ; NVPTX1-NEXT: call void @p0() #[[ATTR11]]
1857 ; NVPTX1-NEXT: ret void
1860 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1861 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
1862 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1863 ; NVPTX1-NEXT: entry:
1864 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1865 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1866 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1867 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1868 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1869 ; NVPTX1-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1870 ; NVPTX1-NEXT: ret void
1873 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1874 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__8
1875 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1876 ; NVPTX1-NEXT: entry:
1877 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1878 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1879 ; NVPTX1-NEXT: call void @p1() #[[ATTR11]]
1880 ; NVPTX1-NEXT: ret void
1883 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1884 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
1885 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1886 ; NVPTX1-NEXT: entry:
1887 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1888 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1889 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1890 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1891 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1892 ; NVPTX1-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1893 ; NVPTX1-NEXT: ret void
1896 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1897 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
1898 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1899 ; NVPTX1-NEXT: entry:
1900 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1901 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1902 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
1903 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1904 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1905 ; NVPTX1: user_code.entry:
1906 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1907 ; NVPTX1-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1908 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
1909 ; NVPTX1-NEXT: ret void
1910 ; NVPTX1: worker.exit:
1911 ; NVPTX1-NEXT: ret void
1914 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1915 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__9
1916 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1917 ; NVPTX1-NEXT: entry:
1918 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1919 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1920 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1921 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1922 ; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1923 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1924 ; NVPTX1-NEXT: ret void
1927 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1928 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__10
1929 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1930 ; NVPTX1-NEXT: entry:
1931 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1932 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1933 ; NVPTX1-NEXT: call void @p0() #[[ATTR11]]
1934 ; NVPTX1-NEXT: ret void
1937 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1938 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
1939 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1940 ; NVPTX1-NEXT: entry:
1941 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1942 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1943 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1944 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1945 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1946 ; NVPTX1-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1947 ; NVPTX1-NEXT: ret void
1950 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1951 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__11
1952 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1953 ; NVPTX1-NEXT: entry:
1954 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1955 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1956 ; NVPTX1-NEXT: call void @p1() #[[ATTR11]]
1957 ; NVPTX1-NEXT: ret void
1960 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1961 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
1962 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1963 ; NVPTX1-NEXT: entry:
1964 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1965 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1966 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1967 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1968 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1969 ; NVPTX1-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1970 ; NVPTX1-NEXT: ret void
1973 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1974 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
1975 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1976 ; NVPTX1-NEXT: entry:
1977 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1978 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1979 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
1980 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1981 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1982 ; NVPTX1: user_code.entry:
1983 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1984 ; NVPTX1-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1985 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
1986 ; NVPTX1-NEXT: ret void
1987 ; NVPTX1: worker.exit:
1988 ; NVPTX1-NEXT: ret void
1991 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
1992 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__12
1993 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1994 ; NVPTX1-NEXT: entry:
1995 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1996 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1997 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1998 ; NVPTX1-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1999 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2000 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2001 ; NVPTX1-NEXT: ret void
2004 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2005 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__13
2006 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2007 ; NVPTX1-NEXT: entry:
2008 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2009 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2010 ; NVPTX1-NEXT: call void @p0() #[[ATTR11]]
2011 ; NVPTX1-NEXT: ret void
2014 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2015 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
2016 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2017 ; NVPTX1-NEXT: entry:
2018 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2019 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2020 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2021 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2022 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2023 ; NVPTX1-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2024 ; NVPTX1-NEXT: ret void
2027 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2028 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__14
2029 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2030 ; NVPTX1-NEXT: entry:
2031 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2032 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2033 ; NVPTX1-NEXT: call void @p1() #[[ATTR11]]
2034 ; NVPTX1-NEXT: ret void
2037 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2038 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
2039 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2040 ; NVPTX1-NEXT: entry:
2041 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2042 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2043 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2044 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2045 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2046 ; NVPTX1-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2047 ; NVPTX1-NEXT: ret void
2050 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2051 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
2052 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2053 ; NVPTX1-NEXT: entry:
2054 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2055 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2056 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
2057 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2058 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2059 ; NVPTX1: user_code.entry:
2060 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2061 ; NVPTX1-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2062 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
2063 ; NVPTX1-NEXT: ret void
2064 ; NVPTX1: worker.exit:
2065 ; NVPTX1-NEXT: ret void
2068 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2069 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__15
2070 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2071 ; NVPTX1-NEXT: entry:
2072 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2073 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2074 ; NVPTX1-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
2075 ; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
2076 ; NVPTX1-NEXT: ret void
2079 ; NVPTX1: Function Attrs: noinline nounwind
2080 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
2081 ; NVPTX1-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
2082 ; NVPTX1-NEXT: entry:
2083 ; NVPTX1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2084 ; NVPTX1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
2085 ; NVPTX1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
2086 ; NVPTX1-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
2087 ; NVPTX1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2089 ; NVPTX1-NEXT: br label [[RETURN:%.*]]
2091 ; NVPTX1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
2092 ; NVPTX1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
2093 ; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
2094 ; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
2095 ; NVPTX1-NEXT: br label [[RETURN]]
2097 ; NVPTX1-NEXT: ret void
2100 ; NVPTX1: Function Attrs: convergent noinline nounwind
2101 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
2102 ; NVPTX1-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
2103 ; NVPTX1-NEXT: entry:
2104 ; NVPTX1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2105 ; NVPTX1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
2106 ; NVPTX1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
2107 ; NVPTX1-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
2108 ; NVPTX1-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2110 ; NVPTX1-NEXT: br label [[RETURN:%.*]]
2112 ; NVPTX1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
2113 ; NVPTX1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
2114 ; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
2115 ; NVPTX1-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
2116 ; NVPTX1-NEXT: br label [[RETURN]]
2118 ; NVPTX1-NEXT: ret void
2121 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2122 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
2123 ; NVPTX1-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2124 ; NVPTX1-NEXT: entry:
2125 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2126 ; NVPTX1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2127 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
2128 ; NVPTX1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2129 ; NVPTX1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2130 ; NVPTX1: user_code.entry:
2131 ; NVPTX1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2132 ; NVPTX1-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2133 ; NVPTX1-NEXT: call void @__kmpc_target_deinit()
2134 ; NVPTX1-NEXT: ret void
2135 ; NVPTX1: worker.exit:
2136 ; NVPTX1-NEXT: ret void
2139 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2140 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__16
2141 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2142 ; NVPTX1-NEXT: entry:
2143 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2144 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2145 ; NVPTX1-NEXT: call void @weak_callee_empty() #[[ATTR9]]
2146 ; NVPTX1-NEXT: ret void
2149 ; NVPTX1: Function Attrs: convergent noinline nounwind
2150 ; NVPTX1-LABEL: define {{[^@]+}}@weak_callee_empty
2151 ; NVPTX1-SAME: () #[[ATTR1]] {
2152 ; NVPTX1-NEXT: entry:
2153 ; NVPTX1-NEXT: ret void
2156 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2157 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__17
2158 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2159 ; NVPTX1-NEXT: entry:
2160 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2161 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2162 ; NVPTX1-NEXT: call void @p0() #[[ATTR11]]
2163 ; NVPTX1-NEXT: ret void
2166 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2167 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
2168 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2169 ; NVPTX1-NEXT: entry:
2170 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2171 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2172 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2173 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2174 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2175 ; NVPTX1-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2176 ; NVPTX1-NEXT: ret void
2179 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2180 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__18
2181 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2182 ; NVPTX1-NEXT: entry:
2183 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2184 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2185 ; NVPTX1-NEXT: call void @p0() #[[ATTR11]]
2186 ; NVPTX1-NEXT: ret void
2189 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2190 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
2191 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2192 ; NVPTX1-NEXT: entry:
2193 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2194 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2195 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2196 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2197 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2198 ; NVPTX1-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2199 ; NVPTX1-NEXT: ret void
2202 ; NVPTX1: Function Attrs: noinline nounwind
2203 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
2204 ; NVPTX1-SAME: () #[[ATTR6]] {
2205 ; NVPTX1-NEXT: entry:
2206 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2207 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2208 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2209 ; NVPTX1-NEXT: ret void
2212 ; NVPTX1: Function Attrs: convergent noinline nounwind
2213 ; NVPTX1-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
2214 ; NVPTX1-SAME: () #[[ATTR1]] {
2215 ; NVPTX1-NEXT: entry:
2216 ; NVPTX1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2217 ; NVPTX1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2218 ; NVPTX1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2219 ; NVPTX1-NEXT: ret void
2222 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2223 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__19
2224 ; NVPTX1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2225 ; NVPTX1-NEXT: entry:
2226 ; NVPTX1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2227 ; NVPTX1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2228 ; NVPTX1-NEXT: call void @p0() #[[ATTR11]]
2229 ; NVPTX1-NEXT: ret void
2232 ; NVPTX1: Function Attrs: convergent noinline norecurse nounwind
2233 ; NVPTX1-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
2234 ; NVPTX1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2235 ; NVPTX1-NEXT: entry:
2236 ; NVPTX1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2237 ; NVPTX1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2238 ; NVPTX1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2239 ; NVPTX1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2240 ; NVPTX1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2241 ; NVPTX1-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2242 ; NVPTX1-NEXT: ret void
2245 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2246 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
2247 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
2248 ; AMDGPU2-NEXT: entry:
2249 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2250 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2251 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
2252 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2253 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2254 ; AMDGPU2: user_code.entry:
2255 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
2256 ; AMDGPU2-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2257 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2258 ; AMDGPU2-NEXT: ret void
2259 ; AMDGPU2: worker.exit:
2260 ; AMDGPU2-NEXT: ret void
2263 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2264 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__
2265 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2266 ; AMDGPU2-NEXT: entry:
2267 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2268 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2269 ; AMDGPU2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
2270 ; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
2271 ; AMDGPU2-NEXT: ret void
2274 ; AMDGPU2: Function Attrs: convergent noinline nounwind
2275 ; AMDGPU2-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
2276 ; AMDGPU2-SAME: () #[[ATTR1:[0-9]+]] {
2277 ; AMDGPU2-NEXT: entry:
2278 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2279 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2280 ; AMDGPU2-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2281 ; AMDGPU2-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2282 ; AMDGPU2: omp_if.then:
2283 ; AMDGPU2-NEXT: store i32 0, ptr @G, align 4
2284 ; AMDGPU2-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2285 ; AMDGPU2-NEXT: br label [[OMP_IF_END]]
2286 ; AMDGPU2: omp_if.end:
2287 ; AMDGPU2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
2288 ; AMDGPU2-NEXT: ret void
2291 ; AMDGPU2: Function Attrs: convergent noinline nounwind
2292 ; AMDGPU2-LABEL: define {{[^@]+}}@no_parallel_region_in_here
2293 ; AMDGPU2-SAME: () #[[ATTR1]] {
2294 ; AMDGPU2-NEXT: entry:
2295 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2296 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
2297 ; AMDGPU2-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2298 ; AMDGPU2-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2299 ; AMDGPU2: omp_if.then:
2300 ; AMDGPU2-NEXT: store i32 0, ptr @G, align 4
2301 ; AMDGPU2-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
2302 ; AMDGPU2-NEXT: br label [[OMP_IF_END]]
2303 ; AMDGPU2: omp_if.end:
2304 ; AMDGPU2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
2305 ; AMDGPU2-NEXT: ret void
2308 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2309 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
2310 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2311 ; AMDGPU2-NEXT: entry:
2312 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2313 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2314 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
2315 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2316 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2317 ; AMDGPU2: user_code.entry:
2318 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2319 ; AMDGPU2-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2320 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2321 ; AMDGPU2-NEXT: ret void
2322 ; AMDGPU2: worker.exit:
2323 ; AMDGPU2-NEXT: ret void
2326 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2327 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__1
2328 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2329 ; AMDGPU2-NEXT: entry:
2330 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2331 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2332 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2333 ; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2334 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2335 ; AMDGPU2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
2336 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2337 ; AMDGPU2-NEXT: ret void
2340 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2341 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__2
2342 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2343 ; AMDGPU2-NEXT: entry:
2344 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2345 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2346 ; AMDGPU2-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
2347 ; AMDGPU2-NEXT: ret void
2350 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2351 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
2352 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2353 ; AMDGPU2-NEXT: entry:
2354 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2355 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2356 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2357 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2358 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2359 ; AMDGPU2-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2360 ; AMDGPU2-NEXT: ret void
2363 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2364 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__3
2365 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2366 ; AMDGPU2-NEXT: entry:
2367 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2368 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2369 ; AMDGPU2-NEXT: call void @p1() #[[ATTR11]]
2370 ; AMDGPU2-NEXT: ret void
2373 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2374 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
2375 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2376 ; AMDGPU2-NEXT: entry:
2377 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2378 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2379 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2380 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2381 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2382 ; AMDGPU2-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2383 ; AMDGPU2-NEXT: ret void
2386 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2387 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
2388 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2389 ; AMDGPU2-NEXT: entry:
2390 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2391 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2392 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
2393 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2394 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2395 ; AMDGPU2: user_code.entry:
2396 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2397 ; AMDGPU2-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2398 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2399 ; AMDGPU2-NEXT: ret void
2400 ; AMDGPU2: worker.exit:
2401 ; AMDGPU2-NEXT: ret void
2404 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2405 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__4
2406 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2407 ; AMDGPU2-NEXT: entry:
2408 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2409 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2410 ; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2411 ; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
2412 ; AMDGPU2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
2413 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2414 ; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
2415 ; AMDGPU2-NEXT: ret void
2418 ; AMDGPU2: Function Attrs: noinline nounwind
2419 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
2420 ; AMDGPU2-SAME: () #[[ATTR6:[0-9]+]] {
2421 ; AMDGPU2-NEXT: entry:
2422 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2423 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2424 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2425 ; AMDGPU2-NEXT: ret void
2428 ; AMDGPU2: Function Attrs: convergent noinline nounwind
2429 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
2430 ; AMDGPU2-SAME: () #[[ATTR1]] {
2431 ; AMDGPU2-NEXT: entry:
2432 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2433 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2434 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2435 ; AMDGPU2-NEXT: ret void
2438 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2439 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__5
2440 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2441 ; AMDGPU2-NEXT: entry:
2442 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2443 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2444 ; AMDGPU2-NEXT: call void @p1() #[[ATTR11]]
2445 ; AMDGPU2-NEXT: ret void
2448 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2449 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
2450 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2451 ; AMDGPU2-NEXT: entry:
2452 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2453 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2454 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2455 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2456 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2457 ; AMDGPU2-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2458 ; AMDGPU2-NEXT: ret void
2461 ; AMDGPU2: Function Attrs: noinline nounwind
2462 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
2463 ; AMDGPU2-SAME: () #[[ATTR6]] {
2464 ; AMDGPU2-NEXT: entry:
2465 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2466 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2467 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2468 ; AMDGPU2-NEXT: ret void
2471 ; AMDGPU2: Function Attrs: convergent noinline nounwind
2472 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
2473 ; AMDGPU2-SAME: () #[[ATTR1]] {
2474 ; AMDGPU2-NEXT: entry:
2475 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2476 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2477 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2478 ; AMDGPU2-NEXT: ret void
2481 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2482 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
2483 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2484 ; AMDGPU2-NEXT: entry:
2485 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2486 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2487 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
2488 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2489 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2490 ; AMDGPU2: user_code.entry:
2491 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2492 ; AMDGPU2-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2493 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2494 ; AMDGPU2-NEXT: ret void
2495 ; AMDGPU2: worker.exit:
2496 ; AMDGPU2-NEXT: ret void
2499 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2500 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__6
2501 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2502 ; AMDGPU2-NEXT: entry:
2503 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2504 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2505 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2506 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2507 ; AMDGPU2-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
2508 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2509 ; AMDGPU2-NEXT: ret void
2512 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2513 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__7
2514 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2515 ; AMDGPU2-NEXT: entry:
2516 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2517 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2518 ; AMDGPU2-NEXT: call void @p0() #[[ATTR11]]
2519 ; AMDGPU2-NEXT: ret void
2522 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2523 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
2524 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2525 ; AMDGPU2-NEXT: entry:
2526 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2527 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2528 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2529 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2530 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2531 ; AMDGPU2-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2532 ; AMDGPU2-NEXT: ret void
2535 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2536 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__8
2537 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2538 ; AMDGPU2-NEXT: entry:
2539 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2540 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2541 ; AMDGPU2-NEXT: call void @p1() #[[ATTR11]]
2542 ; AMDGPU2-NEXT: ret void
2545 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2546 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
2547 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2548 ; AMDGPU2-NEXT: entry:
2549 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2550 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2551 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2552 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2553 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2554 ; AMDGPU2-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2555 ; AMDGPU2-NEXT: ret void
2558 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2559 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
2560 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2561 ; AMDGPU2-NEXT: entry:
2562 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2563 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2564 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
2565 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2566 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2567 ; AMDGPU2: user_code.entry:
2568 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2569 ; AMDGPU2-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2570 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2571 ; AMDGPU2-NEXT: ret void
2572 ; AMDGPU2: worker.exit:
2573 ; AMDGPU2-NEXT: ret void
2576 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2577 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__9
2578 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2579 ; AMDGPU2-NEXT: entry:
2580 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2581 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2582 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2583 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2584 ; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2585 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2586 ; AMDGPU2-NEXT: ret void
2589 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2590 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__10
2591 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2592 ; AMDGPU2-NEXT: entry:
2593 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2594 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2595 ; AMDGPU2-NEXT: call void @p0() #[[ATTR11]]
2596 ; AMDGPU2-NEXT: ret void
2599 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2600 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
2601 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2602 ; AMDGPU2-NEXT: entry:
2603 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2604 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2605 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2606 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2607 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2608 ; AMDGPU2-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2609 ; AMDGPU2-NEXT: ret void
2612 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2613 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__11
2614 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2615 ; AMDGPU2-NEXT: entry:
2616 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2617 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2618 ; AMDGPU2-NEXT: call void @p1() #[[ATTR11]]
2619 ; AMDGPU2-NEXT: ret void
2622 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2623 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
2624 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2625 ; AMDGPU2-NEXT: entry:
2626 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2627 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2628 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2629 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2630 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2631 ; AMDGPU2-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2632 ; AMDGPU2-NEXT: ret void
2635 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2636 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
2637 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2638 ; AMDGPU2-NEXT: entry:
2639 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2640 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2641 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
2642 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2643 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2644 ; AMDGPU2: user_code.entry:
2645 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2646 ; AMDGPU2-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2647 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2648 ; AMDGPU2-NEXT: ret void
2649 ; AMDGPU2: worker.exit:
2650 ; AMDGPU2-NEXT: ret void
2653 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2654 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__12
2655 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2656 ; AMDGPU2-NEXT: entry:
2657 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2658 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2659 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2660 ; AMDGPU2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2661 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2662 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2663 ; AMDGPU2-NEXT: ret void
2666 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2667 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__13
2668 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2669 ; AMDGPU2-NEXT: entry:
2670 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2671 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2672 ; AMDGPU2-NEXT: call void @p0() #[[ATTR11]]
2673 ; AMDGPU2-NEXT: ret void
2676 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2677 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
2678 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2679 ; AMDGPU2-NEXT: entry:
2680 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2681 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2682 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2683 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2684 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2685 ; AMDGPU2-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2686 ; AMDGPU2-NEXT: ret void
2689 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2690 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__14
2691 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2692 ; AMDGPU2-NEXT: entry:
2693 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2694 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2695 ; AMDGPU2-NEXT: call void @p1() #[[ATTR11]]
2696 ; AMDGPU2-NEXT: ret void
2699 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2700 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
2701 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2702 ; AMDGPU2-NEXT: entry:
2703 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2704 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2705 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2706 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2707 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2708 ; AMDGPU2-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2709 ; AMDGPU2-NEXT: ret void
2712 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2713 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
2714 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2715 ; AMDGPU2-NEXT: entry:
2716 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2717 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2718 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
2719 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2720 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2721 ; AMDGPU2: user_code.entry:
2722 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2723 ; AMDGPU2-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2724 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2725 ; AMDGPU2-NEXT: ret void
2726 ; AMDGPU2: worker.exit:
2727 ; AMDGPU2-NEXT: ret void
2730 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2731 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__15
2732 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2733 ; AMDGPU2-NEXT: entry:
2734 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2735 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2736 ; AMDGPU2-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
2737 ; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
2738 ; AMDGPU2-NEXT: ret void
2741 ; AMDGPU2: Function Attrs: noinline nounwind
2742 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
2743 ; AMDGPU2-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
2744 ; AMDGPU2-NEXT: entry:
2745 ; AMDGPU2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2746 ; AMDGPU2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
2747 ; AMDGPU2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
2748 ; AMDGPU2-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
2749 ; AMDGPU2-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2751 ; AMDGPU2-NEXT: br label [[RETURN:%.*]]
2753 ; AMDGPU2-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
2754 ; AMDGPU2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
2755 ; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
2756 ; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
2757 ; AMDGPU2-NEXT: br label [[RETURN]]
2759 ; AMDGPU2-NEXT: ret void
2762 ; AMDGPU2: Function Attrs: convergent noinline nounwind
2763 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
2764 ; AMDGPU2-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
2765 ; AMDGPU2-NEXT: entry:
2766 ; AMDGPU2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2767 ; AMDGPU2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
2768 ; AMDGPU2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
2769 ; AMDGPU2-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
2770 ; AMDGPU2-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2772 ; AMDGPU2-NEXT: br label [[RETURN:%.*]]
2774 ; AMDGPU2-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
2775 ; AMDGPU2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
2776 ; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
2777 ; AMDGPU2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
2778 ; AMDGPU2-NEXT: br label [[RETURN]]
2780 ; AMDGPU2-NEXT: ret void
2783 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2784 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
2785 ; AMDGPU2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2786 ; AMDGPU2-NEXT: entry:
2787 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2788 ; AMDGPU2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2789 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
2790 ; AMDGPU2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2791 ; AMDGPU2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2792 ; AMDGPU2: user_code.entry:
2793 ; AMDGPU2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2794 ; AMDGPU2-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2795 ; AMDGPU2-NEXT: call void @__kmpc_target_deinit()
2796 ; AMDGPU2-NEXT: ret void
2797 ; AMDGPU2: worker.exit:
2798 ; AMDGPU2-NEXT: ret void
2801 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2802 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__16
2803 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2804 ; AMDGPU2-NEXT: entry:
2805 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2806 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2807 ; AMDGPU2-NEXT: call void @weak_callee_empty() #[[ATTR9]]
2808 ; AMDGPU2-NEXT: ret void
2811 ; AMDGPU2: Function Attrs: convergent noinline nounwind
2812 ; AMDGPU2-LABEL: define {{[^@]+}}@weak_callee_empty
2813 ; AMDGPU2-SAME: () #[[ATTR1]] {
2814 ; AMDGPU2-NEXT: entry:
2815 ; AMDGPU2-NEXT: ret void
2818 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2819 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__17
2820 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2821 ; AMDGPU2-NEXT: entry:
2822 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2823 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2824 ; AMDGPU2-NEXT: call void @p0() #[[ATTR11]]
2825 ; AMDGPU2-NEXT: ret void
2828 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2829 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
2830 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2831 ; AMDGPU2-NEXT: entry:
2832 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2833 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2834 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2835 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2836 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2837 ; AMDGPU2-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2838 ; AMDGPU2-NEXT: ret void
2841 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2842 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__18
2843 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2844 ; AMDGPU2-NEXT: entry:
2845 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2846 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2847 ; AMDGPU2-NEXT: call void @p0() #[[ATTR11]]
2848 ; AMDGPU2-NEXT: ret void
2851 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2852 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
2853 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2854 ; AMDGPU2-NEXT: entry:
2855 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2856 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2857 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2858 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2859 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2860 ; AMDGPU2-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2861 ; AMDGPU2-NEXT: ret void
2864 ; AMDGPU2: Function Attrs: noinline nounwind
2865 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
2866 ; AMDGPU2-SAME: () #[[ATTR6]] {
2867 ; AMDGPU2-NEXT: entry:
2868 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2869 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2870 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2871 ; AMDGPU2-NEXT: ret void
2874 ; AMDGPU2: Function Attrs: convergent noinline nounwind
2875 ; AMDGPU2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
2876 ; AMDGPU2-SAME: () #[[ATTR1]] {
2877 ; AMDGPU2-NEXT: entry:
2878 ; AMDGPU2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2879 ; AMDGPU2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2880 ; AMDGPU2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2881 ; AMDGPU2-NEXT: ret void
2884 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2885 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__19
2886 ; AMDGPU2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2887 ; AMDGPU2-NEXT: entry:
2888 ; AMDGPU2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2889 ; AMDGPU2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2890 ; AMDGPU2-NEXT: call void @p0() #[[ATTR11]]
2891 ; AMDGPU2-NEXT: ret void
2894 ; AMDGPU2: Function Attrs: convergent noinline norecurse nounwind
2895 ; AMDGPU2-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
2896 ; AMDGPU2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2897 ; AMDGPU2-NEXT: entry:
2898 ; AMDGPU2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2899 ; AMDGPU2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2900 ; AMDGPU2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2901 ; AMDGPU2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2902 ; AMDGPU2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2903 ; AMDGPU2-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2904 ; AMDGPU2-NEXT: ret void
2907 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
2908 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
2909 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
2910 ; AMDGPU3-NEXT: entry:
2911 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2912 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2913 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
2914 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2915 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2916 ; AMDGPU3: user_code.entry:
2917 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
2918 ; AMDGPU3-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2919 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
2920 ; AMDGPU3-NEXT: ret void
2921 ; AMDGPU3: worker.exit:
2922 ; AMDGPU3-NEXT: ret void
2925 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
2926 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__
2927 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2928 ; AMDGPU3-NEXT: entry:
2929 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2930 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2931 ; AMDGPU3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
2932 ; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
2933 ; AMDGPU3-NEXT: ret void
2936 ; AMDGPU3: Function Attrs: convergent noinline nounwind
2937 ; AMDGPU3-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
2938 ; AMDGPU3-SAME: () #[[ATTR1:[0-9]+]] {
2939 ; AMDGPU3-NEXT: entry:
2940 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2941 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2942 ; AMDGPU3-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2943 ; AMDGPU3-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2944 ; AMDGPU3: omp_if.then:
2945 ; AMDGPU3-NEXT: store i32 0, ptr @G, align 4
2946 ; AMDGPU3-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2947 ; AMDGPU3-NEXT: br label [[OMP_IF_END]]
2948 ; AMDGPU3: omp_if.end:
2949 ; AMDGPU3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
2950 ; AMDGPU3-NEXT: ret void
2953 ; AMDGPU3: Function Attrs: convergent noinline nounwind
2954 ; AMDGPU3-LABEL: define {{[^@]+}}@no_parallel_region_in_here
2955 ; AMDGPU3-SAME: () #[[ATTR1]] {
2956 ; AMDGPU3-NEXT: entry:
2957 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2958 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
2959 ; AMDGPU3-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2960 ; AMDGPU3-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2961 ; AMDGPU3: omp_if.then:
2962 ; AMDGPU3-NEXT: store i32 0, ptr @G, align 4
2963 ; AMDGPU3-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
2964 ; AMDGPU3-NEXT: br label [[OMP_IF_END]]
2965 ; AMDGPU3: omp_if.end:
2966 ; AMDGPU3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
2967 ; AMDGPU3-NEXT: ret void
2970 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
2971 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
2972 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2973 ; AMDGPU3-NEXT: entry:
2974 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2975 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2976 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
2977 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2978 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2979 ; AMDGPU3: user_code.entry:
2980 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2981 ; AMDGPU3-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2982 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
2983 ; AMDGPU3-NEXT: ret void
2984 ; AMDGPU3: worker.exit:
2985 ; AMDGPU3-NEXT: ret void
2988 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
2989 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__1
2990 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2991 ; AMDGPU3-NEXT: entry:
2992 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2993 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2994 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2995 ; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2996 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2997 ; AMDGPU3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
2998 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2999 ; AMDGPU3-NEXT: ret void
3002 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3003 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__2
3004 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3005 ; AMDGPU3-NEXT: entry:
3006 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3007 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3008 ; AMDGPU3-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
3009 ; AMDGPU3-NEXT: ret void
3012 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3013 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
3014 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3015 ; AMDGPU3-NEXT: entry:
3016 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3017 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3018 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3019 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3020 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3021 ; AMDGPU3-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3022 ; AMDGPU3-NEXT: ret void
3025 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3026 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__3
3027 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3028 ; AMDGPU3-NEXT: entry:
3029 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3030 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3031 ; AMDGPU3-NEXT: call void @p1() #[[ATTR11]]
3032 ; AMDGPU3-NEXT: ret void
3035 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3036 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
3037 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3038 ; AMDGPU3-NEXT: entry:
3039 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3040 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3041 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3042 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3043 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3044 ; AMDGPU3-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3045 ; AMDGPU3-NEXT: ret void
3048 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3049 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
3050 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3051 ; AMDGPU3-NEXT: entry:
3052 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3053 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3054 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
3055 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3056 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3057 ; AMDGPU3: user_code.entry:
3058 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3059 ; AMDGPU3-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3060 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
3061 ; AMDGPU3-NEXT: ret void
3062 ; AMDGPU3: worker.exit:
3063 ; AMDGPU3-NEXT: ret void
3066 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3067 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__4
3068 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3069 ; AMDGPU3-NEXT: entry:
3070 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3071 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3072 ; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3073 ; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
3074 ; AMDGPU3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
3075 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3076 ; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
3077 ; AMDGPU3-NEXT: ret void
3080 ; AMDGPU3: Function Attrs: noinline nounwind
3081 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
3082 ; AMDGPU3-SAME: () #[[ATTR6:[0-9]+]] {
3083 ; AMDGPU3-NEXT: entry:
3084 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3085 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3086 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3087 ; AMDGPU3-NEXT: ret void
3090 ; AMDGPU3: Function Attrs: convergent noinline nounwind
3091 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
3092 ; AMDGPU3-SAME: () #[[ATTR1]] {
3093 ; AMDGPU3-NEXT: entry:
3094 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3095 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3096 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3097 ; AMDGPU3-NEXT: ret void
3100 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3101 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__5
3102 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3103 ; AMDGPU3-NEXT: entry:
3104 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3105 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3106 ; AMDGPU3-NEXT: call void @p1() #[[ATTR11]]
3107 ; AMDGPU3-NEXT: ret void
3110 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3111 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
3112 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3113 ; AMDGPU3-NEXT: entry:
3114 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3115 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3116 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3117 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3118 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3119 ; AMDGPU3-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3120 ; AMDGPU3-NEXT: ret void
3123 ; AMDGPU3: Function Attrs: noinline nounwind
3124 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
3125 ; AMDGPU3-SAME: () #[[ATTR6]] {
3126 ; AMDGPU3-NEXT: entry:
3127 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3128 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3129 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3130 ; AMDGPU3-NEXT: ret void
3133 ; AMDGPU3: Function Attrs: convergent noinline nounwind
3134 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
3135 ; AMDGPU3-SAME: () #[[ATTR1]] {
3136 ; AMDGPU3-NEXT: entry:
3137 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3138 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3139 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3140 ; AMDGPU3-NEXT: ret void
3143 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3144 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
3145 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3146 ; AMDGPU3-NEXT: entry:
3147 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3148 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3149 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
3150 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3151 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3152 ; AMDGPU3: user_code.entry:
3153 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3154 ; AMDGPU3-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3155 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
3156 ; AMDGPU3-NEXT: ret void
3157 ; AMDGPU3: worker.exit:
3158 ; AMDGPU3-NEXT: ret void
3161 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3162 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__6
3163 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3164 ; AMDGPU3-NEXT: entry:
3165 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3166 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3167 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3168 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3169 ; AMDGPU3-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
3170 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3171 ; AMDGPU3-NEXT: ret void
3174 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3175 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__7
3176 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3177 ; AMDGPU3-NEXT: entry:
3178 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3179 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3180 ; AMDGPU3-NEXT: call void @p0() #[[ATTR11]]
3181 ; AMDGPU3-NEXT: ret void
3184 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3185 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
3186 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3187 ; AMDGPU3-NEXT: entry:
3188 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3189 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3190 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3191 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3192 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3193 ; AMDGPU3-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3194 ; AMDGPU3-NEXT: ret void
3197 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3198 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__8
3199 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3200 ; AMDGPU3-NEXT: entry:
3201 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3202 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3203 ; AMDGPU3-NEXT: call void @p1() #[[ATTR11]]
3204 ; AMDGPU3-NEXT: ret void
3207 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3208 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
3209 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3210 ; AMDGPU3-NEXT: entry:
3211 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3212 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3213 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3214 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3215 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3216 ; AMDGPU3-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3217 ; AMDGPU3-NEXT: ret void
3220 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3221 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
3222 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3223 ; AMDGPU3-NEXT: entry:
3224 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3225 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3226 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
3227 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3228 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3229 ; AMDGPU3: user_code.entry:
3230 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3231 ; AMDGPU3-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3232 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
3233 ; AMDGPU3-NEXT: ret void
3234 ; AMDGPU3: worker.exit:
3235 ; AMDGPU3-NEXT: ret void
3238 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3239 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__9
3240 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3241 ; AMDGPU3-NEXT: entry:
3242 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3243 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3244 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3245 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3246 ; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3247 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3248 ; AMDGPU3-NEXT: ret void
3251 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3252 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__10
3253 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3254 ; AMDGPU3-NEXT: entry:
3255 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3256 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3257 ; AMDGPU3-NEXT: call void @p0() #[[ATTR11]]
3258 ; AMDGPU3-NEXT: ret void
3261 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3262 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
3263 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3264 ; AMDGPU3-NEXT: entry:
3265 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3266 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3267 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3268 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3269 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3270 ; AMDGPU3-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3271 ; AMDGPU3-NEXT: ret void
3274 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3275 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__11
3276 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3277 ; AMDGPU3-NEXT: entry:
3278 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3279 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3280 ; AMDGPU3-NEXT: call void @p1() #[[ATTR11]]
3281 ; AMDGPU3-NEXT: ret void
3284 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3285 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
3286 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3287 ; AMDGPU3-NEXT: entry:
3288 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3289 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3290 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3291 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3292 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3293 ; AMDGPU3-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3294 ; AMDGPU3-NEXT: ret void
3297 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3298 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
3299 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3300 ; AMDGPU3-NEXT: entry:
3301 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3302 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3303 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
3304 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3305 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3306 ; AMDGPU3: user_code.entry:
3307 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3308 ; AMDGPU3-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3309 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
3310 ; AMDGPU3-NEXT: ret void
3311 ; AMDGPU3: worker.exit:
3312 ; AMDGPU3-NEXT: ret void
3315 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3316 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__12
3317 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3318 ; AMDGPU3-NEXT: entry:
3319 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3320 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3321 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3322 ; AMDGPU3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3323 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3324 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3325 ; AMDGPU3-NEXT: ret void
3328 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3329 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__13
3330 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3331 ; AMDGPU3-NEXT: entry:
3332 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3333 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3334 ; AMDGPU3-NEXT: call void @p0() #[[ATTR11]]
3335 ; AMDGPU3-NEXT: ret void
3338 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3339 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
3340 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3341 ; AMDGPU3-NEXT: entry:
3342 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3343 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3344 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3345 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3346 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3347 ; AMDGPU3-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3348 ; AMDGPU3-NEXT: ret void
3351 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3352 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__14
3353 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3354 ; AMDGPU3-NEXT: entry:
3355 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3356 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3357 ; AMDGPU3-NEXT: call void @p1() #[[ATTR11]]
3358 ; AMDGPU3-NEXT: ret void
3361 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3362 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
3363 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3364 ; AMDGPU3-NEXT: entry:
3365 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3366 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3367 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3368 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3369 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3370 ; AMDGPU3-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3371 ; AMDGPU3-NEXT: ret void
3374 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3375 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
3376 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3377 ; AMDGPU3-NEXT: entry:
3378 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3379 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3380 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
3381 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3382 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3383 ; AMDGPU3: user_code.entry:
3384 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3385 ; AMDGPU3-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3386 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
3387 ; AMDGPU3-NEXT: ret void
3388 ; AMDGPU3: worker.exit:
3389 ; AMDGPU3-NEXT: ret void
3392 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3393 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__15
3394 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3395 ; AMDGPU3-NEXT: entry:
3396 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3397 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3398 ; AMDGPU3-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
3399 ; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
3400 ; AMDGPU3-NEXT: ret void
3403 ; AMDGPU3: Function Attrs: noinline nounwind
3404 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
3405 ; AMDGPU3-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
3406 ; AMDGPU3-NEXT: entry:
3407 ; AMDGPU3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3408 ; AMDGPU3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
3409 ; AMDGPU3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
3410 ; AMDGPU3-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
3411 ; AMDGPU3-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3413 ; AMDGPU3-NEXT: br label [[RETURN:%.*]]
3415 ; AMDGPU3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
3416 ; AMDGPU3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
3417 ; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
3418 ; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
3419 ; AMDGPU3-NEXT: br label [[RETURN]]
3421 ; AMDGPU3-NEXT: ret void
3424 ; AMDGPU3: Function Attrs: convergent noinline nounwind
3425 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
3426 ; AMDGPU3-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
3427 ; AMDGPU3-NEXT: entry:
3428 ; AMDGPU3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3429 ; AMDGPU3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
3430 ; AMDGPU3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
3431 ; AMDGPU3-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
3432 ; AMDGPU3-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3434 ; AMDGPU3-NEXT: br label [[RETURN:%.*]]
3436 ; AMDGPU3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
3437 ; AMDGPU3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
3438 ; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
3439 ; AMDGPU3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
3440 ; AMDGPU3-NEXT: br label [[RETURN]]
3442 ; AMDGPU3-NEXT: ret void
3445 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3446 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
3447 ; AMDGPU3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3448 ; AMDGPU3-NEXT: entry:
3449 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3450 ; AMDGPU3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3451 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
3452 ; AMDGPU3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3453 ; AMDGPU3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3454 ; AMDGPU3: user_code.entry:
3455 ; AMDGPU3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3456 ; AMDGPU3-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3457 ; AMDGPU3-NEXT: call void @__kmpc_target_deinit()
3458 ; AMDGPU3-NEXT: ret void
3459 ; AMDGPU3: worker.exit:
3460 ; AMDGPU3-NEXT: ret void
3463 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3464 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__16
3465 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3466 ; AMDGPU3-NEXT: entry:
3467 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3468 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3469 ; AMDGPU3-NEXT: call void @weak_callee_empty() #[[ATTR9]]
3470 ; AMDGPU3-NEXT: ret void
3473 ; AMDGPU3: Function Attrs: convergent noinline nounwind
3474 ; AMDGPU3-LABEL: define {{[^@]+}}@weak_callee_empty
3475 ; AMDGPU3-SAME: () #[[ATTR1]] {
3476 ; AMDGPU3-NEXT: entry:
3477 ; AMDGPU3-NEXT: ret void
3480 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3481 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__17
3482 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3483 ; AMDGPU3-NEXT: entry:
3484 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3485 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3486 ; AMDGPU3-NEXT: call void @p0() #[[ATTR11]]
3487 ; AMDGPU3-NEXT: ret void
3490 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3491 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
3492 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3493 ; AMDGPU3-NEXT: entry:
3494 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3495 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3496 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3497 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3498 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3499 ; AMDGPU3-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3500 ; AMDGPU3-NEXT: ret void
3503 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3504 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__18
3505 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3506 ; AMDGPU3-NEXT: entry:
3507 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3508 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3509 ; AMDGPU3-NEXT: call void @p0() #[[ATTR11]]
3510 ; AMDGPU3-NEXT: ret void
3513 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3514 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
3515 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3516 ; AMDGPU3-NEXT: entry:
3517 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3518 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3519 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3520 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3521 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3522 ; AMDGPU3-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3523 ; AMDGPU3-NEXT: ret void
3526 ; AMDGPU3: Function Attrs: noinline nounwind
3527 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
3528 ; AMDGPU3-SAME: () #[[ATTR6]] {
3529 ; AMDGPU3-NEXT: entry:
3530 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3531 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3532 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3533 ; AMDGPU3-NEXT: ret void
3536 ; AMDGPU3: Function Attrs: convergent noinline nounwind
3537 ; AMDGPU3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
3538 ; AMDGPU3-SAME: () #[[ATTR1]] {
3539 ; AMDGPU3-NEXT: entry:
3540 ; AMDGPU3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3541 ; AMDGPU3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3542 ; AMDGPU3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3543 ; AMDGPU3-NEXT: ret void
3546 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3547 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__19
3548 ; AMDGPU3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3549 ; AMDGPU3-NEXT: entry:
3550 ; AMDGPU3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3551 ; AMDGPU3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3552 ; AMDGPU3-NEXT: call void @p0() #[[ATTR11]]
3553 ; AMDGPU3-NEXT: ret void
3556 ; AMDGPU3: Function Attrs: convergent noinline norecurse nounwind
3557 ; AMDGPU3-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
3558 ; AMDGPU3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3559 ; AMDGPU3-NEXT: entry:
3560 ; AMDGPU3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3561 ; AMDGPU3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3562 ; AMDGPU3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3563 ; AMDGPU3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3564 ; AMDGPU3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3565 ; AMDGPU3-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3566 ; AMDGPU3-NEXT: ret void
3569 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3570 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
3571 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
3572 ; NVPTX2-NEXT: entry:
3573 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3574 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3575 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
3576 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3577 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3578 ; NVPTX2: user_code.entry:
3579 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
3580 ; NVPTX2-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3581 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
3582 ; NVPTX2-NEXT: ret void
3583 ; NVPTX2: worker.exit:
3584 ; NVPTX2-NEXT: ret void
3587 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3588 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__
3589 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3590 ; NVPTX2-NEXT: entry:
3591 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3592 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3593 ; NVPTX2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
3594 ; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
3595 ; NVPTX2-NEXT: ret void
3598 ; NVPTX2: Function Attrs: convergent noinline nounwind
3599 ; NVPTX2-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
3600 ; NVPTX2-SAME: () #[[ATTR1:[0-9]+]] {
3601 ; NVPTX2-NEXT: entry:
3602 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3603 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
3604 ; NVPTX2-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
3605 ; NVPTX2-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
3606 ; NVPTX2: omp_if.then:
3607 ; NVPTX2-NEXT: store i32 0, ptr @G, align 4
3608 ; NVPTX2-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
3609 ; NVPTX2-NEXT: br label [[OMP_IF_END]]
3610 ; NVPTX2: omp_if.end:
3611 ; NVPTX2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
3612 ; NVPTX2-NEXT: ret void
3615 ; NVPTX2: Function Attrs: convergent noinline nounwind
3616 ; NVPTX2-LABEL: define {{[^@]+}}@no_parallel_region_in_here
3617 ; NVPTX2-SAME: () #[[ATTR1]] {
3618 ; NVPTX2-NEXT: entry:
3619 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3620 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
3621 ; NVPTX2-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
3622 ; NVPTX2-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
3623 ; NVPTX2: omp_if.then:
3624 ; NVPTX2-NEXT: store i32 0, ptr @G, align 4
3625 ; NVPTX2-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
3626 ; NVPTX2-NEXT: br label [[OMP_IF_END]]
3627 ; NVPTX2: omp_if.end:
3628 ; NVPTX2-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
3629 ; NVPTX2-NEXT: ret void
3632 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3633 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
3634 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3635 ; NVPTX2-NEXT: entry:
3636 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3637 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3638 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
3639 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3640 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3641 ; NVPTX2: user_code.entry:
3642 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3643 ; NVPTX2-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3644 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
3645 ; NVPTX2-NEXT: ret void
3646 ; NVPTX2: worker.exit:
3647 ; NVPTX2-NEXT: ret void
3650 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3651 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__1
3652 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3653 ; NVPTX2-NEXT: entry:
3654 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3655 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3656 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3657 ; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3658 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3659 ; NVPTX2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
3660 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3661 ; NVPTX2-NEXT: ret void
3664 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3665 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__2
3666 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3667 ; NVPTX2-NEXT: entry:
3668 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3669 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3670 ; NVPTX2-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
3671 ; NVPTX2-NEXT: ret void
3674 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3675 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
3676 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3677 ; NVPTX2-NEXT: entry:
3678 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3679 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3680 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3681 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3682 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3683 ; NVPTX2-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3684 ; NVPTX2-NEXT: ret void
3687 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3688 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__3
3689 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3690 ; NVPTX2-NEXT: entry:
3691 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3692 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3693 ; NVPTX2-NEXT: call void @p1() #[[ATTR11]]
3694 ; NVPTX2-NEXT: ret void
3697 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3698 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
3699 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3700 ; NVPTX2-NEXT: entry:
3701 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3702 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3703 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3704 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3705 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3706 ; NVPTX2-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3707 ; NVPTX2-NEXT: ret void
3710 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3711 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
3712 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3713 ; NVPTX2-NEXT: entry:
3714 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3715 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3716 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
3717 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3718 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3719 ; NVPTX2: user_code.entry:
3720 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3721 ; NVPTX2-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3722 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
3723 ; NVPTX2-NEXT: ret void
3724 ; NVPTX2: worker.exit:
3725 ; NVPTX2-NEXT: ret void
3728 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3729 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__4
3730 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3731 ; NVPTX2-NEXT: entry:
3732 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3733 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3734 ; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3735 ; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
3736 ; NVPTX2-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
3737 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3738 ; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
3739 ; NVPTX2-NEXT: ret void
3742 ; NVPTX2: Function Attrs: noinline nounwind
3743 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
3744 ; NVPTX2-SAME: () #[[ATTR6:[0-9]+]] {
3745 ; NVPTX2-NEXT: entry:
3746 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3747 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3748 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3749 ; NVPTX2-NEXT: ret void
3752 ; NVPTX2: Function Attrs: convergent noinline nounwind
3753 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
3754 ; NVPTX2-SAME: () #[[ATTR1]] {
3755 ; NVPTX2-NEXT: entry:
3756 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3757 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3758 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3759 ; NVPTX2-NEXT: ret void
3762 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3763 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__5
3764 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3765 ; NVPTX2-NEXT: entry:
3766 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3767 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3768 ; NVPTX2-NEXT: call void @p1() #[[ATTR11]]
3769 ; NVPTX2-NEXT: ret void
3772 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3773 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
3774 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3775 ; NVPTX2-NEXT: entry:
3776 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3777 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3778 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3779 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3780 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3781 ; NVPTX2-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3782 ; NVPTX2-NEXT: ret void
3785 ; NVPTX2: Function Attrs: noinline nounwind
3786 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
3787 ; NVPTX2-SAME: () #[[ATTR6]] {
3788 ; NVPTX2-NEXT: entry:
3789 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3790 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3791 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3792 ; NVPTX2-NEXT: ret void
3795 ; NVPTX2: Function Attrs: convergent noinline nounwind
3796 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
3797 ; NVPTX2-SAME: () #[[ATTR1]] {
3798 ; NVPTX2-NEXT: entry:
3799 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3800 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3801 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3802 ; NVPTX2-NEXT: ret void
3805 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3806 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
3807 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3808 ; NVPTX2-NEXT: entry:
3809 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3810 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3811 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
3812 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3813 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3814 ; NVPTX2: user_code.entry:
3815 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3816 ; NVPTX2-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3817 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
3818 ; NVPTX2-NEXT: ret void
3819 ; NVPTX2: worker.exit:
3820 ; NVPTX2-NEXT: ret void
3823 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3824 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__6
3825 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3826 ; NVPTX2-NEXT: entry:
3827 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3828 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3829 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3830 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3831 ; NVPTX2-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
3832 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3833 ; NVPTX2-NEXT: ret void
3836 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3837 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__7
3838 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3839 ; NVPTX2-NEXT: entry:
3840 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3841 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3842 ; NVPTX2-NEXT: call void @p0() #[[ATTR11]]
3843 ; NVPTX2-NEXT: ret void
3846 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3847 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
3848 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3849 ; NVPTX2-NEXT: entry:
3850 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3851 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3852 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3853 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3854 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3855 ; NVPTX2-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3856 ; NVPTX2-NEXT: ret void
3859 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3860 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__8
3861 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3862 ; NVPTX2-NEXT: entry:
3863 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3864 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3865 ; NVPTX2-NEXT: call void @p1() #[[ATTR11]]
3866 ; NVPTX2-NEXT: ret void
3869 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3870 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
3871 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3872 ; NVPTX2-NEXT: entry:
3873 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3874 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3875 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3876 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3877 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3878 ; NVPTX2-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3879 ; NVPTX2-NEXT: ret void
3882 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3883 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
3884 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3885 ; NVPTX2-NEXT: entry:
3886 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3887 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3888 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
3889 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3890 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3891 ; NVPTX2: user_code.entry:
3892 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3893 ; NVPTX2-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3894 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
3895 ; NVPTX2-NEXT: ret void
3896 ; NVPTX2: worker.exit:
3897 ; NVPTX2-NEXT: ret void
3900 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3901 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__9
3902 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3903 ; NVPTX2-NEXT: entry:
3904 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3905 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3906 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3907 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3908 ; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3909 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3910 ; NVPTX2-NEXT: ret void
3913 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3914 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__10
3915 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3916 ; NVPTX2-NEXT: entry:
3917 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3918 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3919 ; NVPTX2-NEXT: call void @p0() #[[ATTR11]]
3920 ; NVPTX2-NEXT: ret void
3923 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3924 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
3925 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3926 ; NVPTX2-NEXT: entry:
3927 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3928 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3929 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3930 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3931 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3932 ; NVPTX2-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3933 ; NVPTX2-NEXT: ret void
3936 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3937 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__11
3938 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3939 ; NVPTX2-NEXT: entry:
3940 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3941 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3942 ; NVPTX2-NEXT: call void @p1() #[[ATTR11]]
3943 ; NVPTX2-NEXT: ret void
3946 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3947 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
3948 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3949 ; NVPTX2-NEXT: entry:
3950 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3951 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3952 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3953 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3954 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3955 ; NVPTX2-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3956 ; NVPTX2-NEXT: ret void
3959 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3960 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
3961 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3962 ; NVPTX2-NEXT: entry:
3963 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3964 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3965 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
3966 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3967 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3968 ; NVPTX2: user_code.entry:
3969 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3970 ; NVPTX2-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3971 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
3972 ; NVPTX2-NEXT: ret void
3973 ; NVPTX2: worker.exit:
3974 ; NVPTX2-NEXT: ret void
3977 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3978 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__12
3979 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3980 ; NVPTX2-NEXT: entry:
3981 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3982 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3983 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3984 ; NVPTX2-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3985 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3986 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3987 ; NVPTX2-NEXT: ret void
3990 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
3991 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__13
3992 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3993 ; NVPTX2-NEXT: entry:
3994 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3995 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3996 ; NVPTX2-NEXT: call void @p0() #[[ATTR11]]
3997 ; NVPTX2-NEXT: ret void
4000 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4001 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
4002 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4003 ; NVPTX2-NEXT: entry:
4004 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4005 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4006 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4007 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4008 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4009 ; NVPTX2-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4010 ; NVPTX2-NEXT: ret void
4013 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4014 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__14
4015 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4016 ; NVPTX2-NEXT: entry:
4017 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4018 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4019 ; NVPTX2-NEXT: call void @p1() #[[ATTR11]]
4020 ; NVPTX2-NEXT: ret void
4023 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4024 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
4025 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4026 ; NVPTX2-NEXT: entry:
4027 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4028 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4029 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4030 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4031 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4032 ; NVPTX2-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4033 ; NVPTX2-NEXT: ret void
4036 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4037 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
4038 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4039 ; NVPTX2-NEXT: entry:
4040 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4041 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4042 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
4043 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4044 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4045 ; NVPTX2: user_code.entry:
4046 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4047 ; NVPTX2-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4048 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
4049 ; NVPTX2-NEXT: ret void
4050 ; NVPTX2: worker.exit:
4051 ; NVPTX2-NEXT: ret void
4054 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4055 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__15
4056 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4057 ; NVPTX2-NEXT: entry:
4058 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4059 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4060 ; NVPTX2-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
4061 ; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
4062 ; NVPTX2-NEXT: ret void
4065 ; NVPTX2: Function Attrs: noinline nounwind
4066 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
4067 ; NVPTX2-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
4068 ; NVPTX2-NEXT: entry:
4069 ; NVPTX2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
4070 ; NVPTX2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
4071 ; NVPTX2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
4072 ; NVPTX2-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
4073 ; NVPTX2-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
4075 ; NVPTX2-NEXT: br label [[RETURN:%.*]]
4077 ; NVPTX2-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
4078 ; NVPTX2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
4079 ; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
4080 ; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
4081 ; NVPTX2-NEXT: br label [[RETURN]]
4083 ; NVPTX2-NEXT: ret void
4086 ; NVPTX2: Function Attrs: convergent noinline nounwind
4087 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
4088 ; NVPTX2-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
4089 ; NVPTX2-NEXT: entry:
4090 ; NVPTX2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
4091 ; NVPTX2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
4092 ; NVPTX2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
4093 ; NVPTX2-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
4094 ; NVPTX2-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
4096 ; NVPTX2-NEXT: br label [[RETURN:%.*]]
4098 ; NVPTX2-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
4099 ; NVPTX2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
4100 ; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
4101 ; NVPTX2-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
4102 ; NVPTX2-NEXT: br label [[RETURN]]
4104 ; NVPTX2-NEXT: ret void
4107 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4108 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
4109 ; NVPTX2-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4110 ; NVPTX2-NEXT: entry:
4111 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4112 ; NVPTX2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4113 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
4114 ; NVPTX2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4115 ; NVPTX2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4116 ; NVPTX2: user_code.entry:
4117 ; NVPTX2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4118 ; NVPTX2-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4119 ; NVPTX2-NEXT: call void @__kmpc_target_deinit()
4120 ; NVPTX2-NEXT: ret void
4121 ; NVPTX2: worker.exit:
4122 ; NVPTX2-NEXT: ret void
4125 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4126 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__16
4127 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4128 ; NVPTX2-NEXT: entry:
4129 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4130 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4131 ; NVPTX2-NEXT: call void @weak_callee_empty() #[[ATTR9]]
4132 ; NVPTX2-NEXT: ret void
4135 ; NVPTX2: Function Attrs: convergent noinline nounwind
4136 ; NVPTX2-LABEL: define {{[^@]+}}@weak_callee_empty
4137 ; NVPTX2-SAME: () #[[ATTR1]] {
4138 ; NVPTX2-NEXT: entry:
4139 ; NVPTX2-NEXT: ret void
4142 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4143 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__17
4144 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4145 ; NVPTX2-NEXT: entry:
4146 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4147 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4148 ; NVPTX2-NEXT: call void @p0() #[[ATTR11]]
4149 ; NVPTX2-NEXT: ret void
4152 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4153 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
4154 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4155 ; NVPTX2-NEXT: entry:
4156 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4157 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4158 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4159 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4160 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4161 ; NVPTX2-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4162 ; NVPTX2-NEXT: ret void
4165 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4166 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__18
4167 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4168 ; NVPTX2-NEXT: entry:
4169 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4170 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4171 ; NVPTX2-NEXT: call void @p0() #[[ATTR11]]
4172 ; NVPTX2-NEXT: ret void
4175 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4176 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
4177 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4178 ; NVPTX2-NEXT: entry:
4179 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4180 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4181 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4182 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4183 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4184 ; NVPTX2-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4185 ; NVPTX2-NEXT: ret void
4188 ; NVPTX2: Function Attrs: noinline nounwind
4189 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
4190 ; NVPTX2-SAME: () #[[ATTR6]] {
4191 ; NVPTX2-NEXT: entry:
4192 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4193 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
4194 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4195 ; NVPTX2-NEXT: ret void
4198 ; NVPTX2: Function Attrs: convergent noinline nounwind
4199 ; NVPTX2-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
4200 ; NVPTX2-SAME: () #[[ATTR1]] {
4201 ; NVPTX2-NEXT: entry:
4202 ; NVPTX2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4203 ; NVPTX2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
4204 ; NVPTX2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4205 ; NVPTX2-NEXT: ret void
4208 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4209 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__19
4210 ; NVPTX2-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4211 ; NVPTX2-NEXT: entry:
4212 ; NVPTX2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4213 ; NVPTX2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4214 ; NVPTX2-NEXT: call void @p0() #[[ATTR11]]
4215 ; NVPTX2-NEXT: ret void
4218 ; NVPTX2: Function Attrs: convergent noinline norecurse nounwind
4219 ; NVPTX2-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
4220 ; NVPTX2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4221 ; NVPTX2-NEXT: entry:
4222 ; NVPTX2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4223 ; NVPTX2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4224 ; NVPTX2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4225 ; NVPTX2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4226 ; NVPTX2-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4227 ; NVPTX2-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4228 ; NVPTX2-NEXT: ret void
4231 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4232 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
4233 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
4234 ; NVPTX3-NEXT: entry:
4235 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4236 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4237 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
4238 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4239 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4240 ; NVPTX3: user_code.entry:
4241 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
4242 ; NVPTX3-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4243 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4244 ; NVPTX3-NEXT: ret void
4245 ; NVPTX3: worker.exit:
4246 ; NVPTX3-NEXT: ret void
4249 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4250 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__
4251 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4252 ; NVPTX3-NEXT: entry:
4253 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4254 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4255 ; NVPTX3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
4256 ; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
4257 ; NVPTX3-NEXT: ret void
4260 ; NVPTX3: Function Attrs: convergent noinline nounwind
4261 ; NVPTX3-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
4262 ; NVPTX3-SAME: () #[[ATTR1:[0-9]+]] {
4263 ; NVPTX3-NEXT: entry:
4264 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
4265 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
4266 ; NVPTX3-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
4267 ; NVPTX3-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
4268 ; NVPTX3: omp_if.then:
4269 ; NVPTX3-NEXT: store i32 0, ptr @G, align 4
4270 ; NVPTX3-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
4271 ; NVPTX3-NEXT: br label [[OMP_IF_END]]
4272 ; NVPTX3: omp_if.end:
4273 ; NVPTX3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
4274 ; NVPTX3-NEXT: ret void
4277 ; NVPTX3: Function Attrs: convergent noinline nounwind
4278 ; NVPTX3-LABEL: define {{[^@]+}}@no_parallel_region_in_here
4279 ; NVPTX3-SAME: () #[[ATTR1]] {
4280 ; NVPTX3-NEXT: entry:
4281 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
4282 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
4283 ; NVPTX3-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
4284 ; NVPTX3-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
4285 ; NVPTX3: omp_if.then:
4286 ; NVPTX3-NEXT: store i32 0, ptr @G, align 4
4287 ; NVPTX3-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
4288 ; NVPTX3-NEXT: br label [[OMP_IF_END]]
4289 ; NVPTX3: omp_if.end:
4290 ; NVPTX3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
4291 ; NVPTX3-NEXT: ret void
4294 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4295 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
4296 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4297 ; NVPTX3-NEXT: entry:
4298 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4299 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4300 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
4301 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4302 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4303 ; NVPTX3: user_code.entry:
4304 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4305 ; NVPTX3-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4306 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4307 ; NVPTX3-NEXT: ret void
4308 ; NVPTX3: worker.exit:
4309 ; NVPTX3-NEXT: ret void
4312 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4313 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__1
4314 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4315 ; NVPTX3-NEXT: entry:
4316 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4317 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4318 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
4319 ; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
4320 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4321 ; NVPTX3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
4322 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
4323 ; NVPTX3-NEXT: ret void
4326 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4327 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__2
4328 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4329 ; NVPTX3-NEXT: entry:
4330 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4331 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4332 ; NVPTX3-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
4333 ; NVPTX3-NEXT: ret void
4336 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4337 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
4338 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4339 ; NVPTX3-NEXT: entry:
4340 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4341 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4342 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4343 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4344 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4345 ; NVPTX3-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4346 ; NVPTX3-NEXT: ret void
4349 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4350 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__3
4351 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4352 ; NVPTX3-NEXT: entry:
4353 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4354 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4355 ; NVPTX3-NEXT: call void @p1() #[[ATTR11]]
4356 ; NVPTX3-NEXT: ret void
4359 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4360 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
4361 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4362 ; NVPTX3-NEXT: entry:
4363 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4364 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4365 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4366 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4367 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4368 ; NVPTX3-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4369 ; NVPTX3-NEXT: ret void
4372 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4373 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
4374 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4375 ; NVPTX3-NEXT: entry:
4376 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4377 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4378 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
4379 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4380 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4381 ; NVPTX3: user_code.entry:
4382 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4383 ; NVPTX3-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4384 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4385 ; NVPTX3-NEXT: ret void
4386 ; NVPTX3: worker.exit:
4387 ; NVPTX3-NEXT: ret void
4390 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4391 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__4
4392 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4393 ; NVPTX3-NEXT: entry:
4394 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4395 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4396 ; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
4397 ; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
4398 ; NVPTX3-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
4399 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4400 ; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
4401 ; NVPTX3-NEXT: ret void
4404 ; NVPTX3: Function Attrs: noinline nounwind
4405 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
4406 ; NVPTX3-SAME: () #[[ATTR6:[0-9]+]] {
4407 ; NVPTX3-NEXT: entry:
4408 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4409 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
4410 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4411 ; NVPTX3-NEXT: ret void
4414 ; NVPTX3: Function Attrs: convergent noinline nounwind
4415 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
4416 ; NVPTX3-SAME: () #[[ATTR1]] {
4417 ; NVPTX3-NEXT: entry:
4418 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4419 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
4420 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4421 ; NVPTX3-NEXT: ret void
4424 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4425 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__5
4426 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4427 ; NVPTX3-NEXT: entry:
4428 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4429 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4430 ; NVPTX3-NEXT: call void @p1() #[[ATTR11]]
4431 ; NVPTX3-NEXT: ret void
4434 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4435 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
4436 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4437 ; NVPTX3-NEXT: entry:
4438 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4439 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4440 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4441 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4442 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4443 ; NVPTX3-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4444 ; NVPTX3-NEXT: ret void
4447 ; NVPTX3: Function Attrs: noinline nounwind
4448 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
4449 ; NVPTX3-SAME: () #[[ATTR6]] {
4450 ; NVPTX3-NEXT: entry:
4451 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4452 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
4453 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4454 ; NVPTX3-NEXT: ret void
4457 ; NVPTX3: Function Attrs: convergent noinline nounwind
4458 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
4459 ; NVPTX3-SAME: () #[[ATTR1]] {
4460 ; NVPTX3-NEXT: entry:
4461 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4462 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
4463 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4464 ; NVPTX3-NEXT: ret void
4467 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4468 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
4469 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4470 ; NVPTX3-NEXT: entry:
4471 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4472 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4473 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
4474 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4475 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4476 ; NVPTX3: user_code.entry:
4477 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4478 ; NVPTX3-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4479 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4480 ; NVPTX3-NEXT: ret void
4481 ; NVPTX3: worker.exit:
4482 ; NVPTX3-NEXT: ret void
4485 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4486 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__6
4487 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4488 ; NVPTX3-NEXT: entry:
4489 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4490 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4491 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
4492 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4493 ; NVPTX3-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
4494 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
4495 ; NVPTX3-NEXT: ret void
4498 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4499 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__7
4500 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4501 ; NVPTX3-NEXT: entry:
4502 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4503 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4504 ; NVPTX3-NEXT: call void @p0() #[[ATTR11]]
4505 ; NVPTX3-NEXT: ret void
4508 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4509 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
4510 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4511 ; NVPTX3-NEXT: entry:
4512 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4513 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4514 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4515 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4516 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4517 ; NVPTX3-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4518 ; NVPTX3-NEXT: ret void
4521 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4522 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__8
4523 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4524 ; NVPTX3-NEXT: entry:
4525 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4526 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4527 ; NVPTX3-NEXT: call void @p1() #[[ATTR11]]
4528 ; NVPTX3-NEXT: ret void
4531 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4532 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
4533 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4534 ; NVPTX3-NEXT: entry:
4535 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4536 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4537 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4538 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4539 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4540 ; NVPTX3-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4541 ; NVPTX3-NEXT: ret void
4544 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4545 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
4546 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4547 ; NVPTX3-NEXT: entry:
4548 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4549 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4550 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
4551 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4552 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4553 ; NVPTX3: user_code.entry:
4554 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4555 ; NVPTX3-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4556 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4557 ; NVPTX3-NEXT: ret void
4558 ; NVPTX3: worker.exit:
4559 ; NVPTX3-NEXT: ret void
4562 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4563 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__9
4564 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4565 ; NVPTX3-NEXT: entry:
4566 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4567 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4568 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
4569 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4570 ; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
4571 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
4572 ; NVPTX3-NEXT: ret void
4575 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4576 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__10
4577 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4578 ; NVPTX3-NEXT: entry:
4579 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4580 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4581 ; NVPTX3-NEXT: call void @p0() #[[ATTR11]]
4582 ; NVPTX3-NEXT: ret void
4585 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4586 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
4587 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4588 ; NVPTX3-NEXT: entry:
4589 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4590 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4591 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4592 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4593 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4594 ; NVPTX3-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4595 ; NVPTX3-NEXT: ret void
4598 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4599 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__11
4600 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4601 ; NVPTX3-NEXT: entry:
4602 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4603 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4604 ; NVPTX3-NEXT: call void @p1() #[[ATTR11]]
4605 ; NVPTX3-NEXT: ret void
4608 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4609 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
4610 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4611 ; NVPTX3-NEXT: entry:
4612 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4613 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4614 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4615 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4616 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4617 ; NVPTX3-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4618 ; NVPTX3-NEXT: ret void
4621 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4622 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
4623 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4624 ; NVPTX3-NEXT: entry:
4625 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4626 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4627 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
4628 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4629 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4630 ; NVPTX3: user_code.entry:
4631 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4632 ; NVPTX3-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4633 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4634 ; NVPTX3-NEXT: ret void
4635 ; NVPTX3: worker.exit:
4636 ; NVPTX3-NEXT: ret void
4639 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4640 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__12
4641 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4642 ; NVPTX3-NEXT: entry:
4643 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4644 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4645 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
4646 ; NVPTX3-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
4647 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4648 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
4649 ; NVPTX3-NEXT: ret void
4652 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4653 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__13
4654 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4655 ; NVPTX3-NEXT: entry:
4656 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4657 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4658 ; NVPTX3-NEXT: call void @p0() #[[ATTR11]]
4659 ; NVPTX3-NEXT: ret void
4662 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4663 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
4664 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4665 ; NVPTX3-NEXT: entry:
4666 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4667 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4668 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4669 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4670 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4671 ; NVPTX3-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4672 ; NVPTX3-NEXT: ret void
4675 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4676 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__14
4677 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4678 ; NVPTX3-NEXT: entry:
4679 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4680 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4681 ; NVPTX3-NEXT: call void @p1() #[[ATTR11]]
4682 ; NVPTX3-NEXT: ret void
4685 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4686 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
4687 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4688 ; NVPTX3-NEXT: entry:
4689 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4690 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4691 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4692 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4693 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4694 ; NVPTX3-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4695 ; NVPTX3-NEXT: ret void
4698 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4699 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
4700 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4701 ; NVPTX3-NEXT: entry:
4702 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4703 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4704 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
4705 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4706 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4707 ; NVPTX3: user_code.entry:
4708 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4709 ; NVPTX3-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4710 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4711 ; NVPTX3-NEXT: ret void
4712 ; NVPTX3: worker.exit:
4713 ; NVPTX3-NEXT: ret void
4716 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4717 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__15
4718 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4719 ; NVPTX3-NEXT: entry:
4720 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4721 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4722 ; NVPTX3-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
4723 ; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
4724 ; NVPTX3-NEXT: ret void
4727 ; NVPTX3: Function Attrs: noinline nounwind
4728 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
4729 ; NVPTX3-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
4730 ; NVPTX3-NEXT: entry:
4731 ; NVPTX3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
4732 ; NVPTX3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
4733 ; NVPTX3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
4734 ; NVPTX3-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
4735 ; NVPTX3-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
4737 ; NVPTX3-NEXT: br label [[RETURN:%.*]]
4739 ; NVPTX3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
4740 ; NVPTX3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
4741 ; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
4742 ; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
4743 ; NVPTX3-NEXT: br label [[RETURN]]
4745 ; NVPTX3-NEXT: ret void
4748 ; NVPTX3: Function Attrs: convergent noinline nounwind
4749 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
4750 ; NVPTX3-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
4751 ; NVPTX3-NEXT: entry:
4752 ; NVPTX3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
4753 ; NVPTX3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
4754 ; NVPTX3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
4755 ; NVPTX3-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
4756 ; NVPTX3-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
4758 ; NVPTX3-NEXT: br label [[RETURN:%.*]]
4760 ; NVPTX3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
4761 ; NVPTX3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
4762 ; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
4763 ; NVPTX3-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
4764 ; NVPTX3-NEXT: br label [[RETURN]]
4766 ; NVPTX3-NEXT: ret void
4769 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4770 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
4771 ; NVPTX3-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
4772 ; NVPTX3-NEXT: entry:
4773 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4774 ; NVPTX3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4775 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
4776 ; NVPTX3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4777 ; NVPTX3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4778 ; NVPTX3: user_code.entry:
4779 ; NVPTX3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
4780 ; NVPTX3-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4781 ; NVPTX3-NEXT: call void @__kmpc_target_deinit()
4782 ; NVPTX3-NEXT: ret void
4783 ; NVPTX3: worker.exit:
4784 ; NVPTX3-NEXT: ret void
4787 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4788 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__16
4789 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4790 ; NVPTX3-NEXT: entry:
4791 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4792 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4793 ; NVPTX3-NEXT: call void @weak_callee_empty() #[[ATTR9]]
4794 ; NVPTX3-NEXT: ret void
4797 ; NVPTX3: Function Attrs: convergent noinline nounwind
4798 ; NVPTX3-LABEL: define {{[^@]+}}@weak_callee_empty
4799 ; NVPTX3-SAME: () #[[ATTR1]] {
4800 ; NVPTX3-NEXT: entry:
4801 ; NVPTX3-NEXT: ret void
4804 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4805 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__17
4806 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4807 ; NVPTX3-NEXT: entry:
4808 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4809 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4810 ; NVPTX3-NEXT: call void @p0() #[[ATTR11]]
4811 ; NVPTX3-NEXT: ret void
4814 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4815 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
4816 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4817 ; NVPTX3-NEXT: entry:
4818 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4819 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4820 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4821 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4822 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4823 ; NVPTX3-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4824 ; NVPTX3-NEXT: ret void
4827 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4828 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__18
4829 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4830 ; NVPTX3-NEXT: entry:
4831 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4832 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4833 ; NVPTX3-NEXT: call void @p0() #[[ATTR11]]
4834 ; NVPTX3-NEXT: ret void
4837 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4838 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
4839 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4840 ; NVPTX3-NEXT: entry:
4841 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4842 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4843 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4844 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4845 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4846 ; NVPTX3-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4847 ; NVPTX3-NEXT: ret void
4850 ; NVPTX3: Function Attrs: noinline nounwind
4851 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
4852 ; NVPTX3-SAME: () #[[ATTR6]] {
4853 ; NVPTX3-NEXT: entry:
4854 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4855 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
4856 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4857 ; NVPTX3-NEXT: ret void
4860 ; NVPTX3: Function Attrs: convergent noinline nounwind
4861 ; NVPTX3-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
4862 ; NVPTX3-SAME: () #[[ATTR1]] {
4863 ; NVPTX3-NEXT: entry:
4864 ; NVPTX3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4865 ; NVPTX3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
4866 ; NVPTX3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4867 ; NVPTX3-NEXT: ret void
4870 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4871 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__19
4872 ; NVPTX3-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4873 ; NVPTX3-NEXT: entry:
4874 ; NVPTX3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4875 ; NVPTX3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4876 ; NVPTX3-NEXT: call void @p0() #[[ATTR11]]
4877 ; NVPTX3-NEXT: ret void
4880 ; NVPTX3: Function Attrs: convergent noinline norecurse nounwind
4881 ; NVPTX3-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
4882 ; NVPTX3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4883 ; NVPTX3-NEXT: entry:
4884 ; NVPTX3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4885 ; NVPTX3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4886 ; NVPTX3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4887 ; NVPTX3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4888 ; NVPTX3-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4889 ; NVPTX3-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4890 ; NVPTX3-NEXT: ret void
4893 ; AMDGPU1: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4894 ; AMDGPU1: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4895 ; AMDGPU1: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4896 ; AMDGPU1: attributes #[[ATTR3]] = { nounwind }
4897 ; AMDGPU1: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4898 ; AMDGPU1: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4899 ; AMDGPU1: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4900 ; AMDGPU1: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4901 ; AMDGPU1: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4902 ; AMDGPU1: attributes #[[ATTR9]] = { convergent nounwind }
4903 ; AMDGPU1: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4904 ; AMDGPU1: attributes #[[ATTR11]] = { convergent }
4906 ; NVPTX1: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4907 ; NVPTX1: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4908 ; NVPTX1: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4909 ; NVPTX1: attributes #[[ATTR3]] = { nounwind }
4910 ; NVPTX1: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4911 ; NVPTX1: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4912 ; NVPTX1: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4913 ; NVPTX1: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4914 ; NVPTX1: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4915 ; NVPTX1: attributes #[[ATTR9]] = { convergent nounwind }
4916 ; NVPTX1: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4917 ; NVPTX1: attributes #[[ATTR11]] = { convergent }
4919 ; AMDGPU2: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4920 ; AMDGPU2: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4921 ; AMDGPU2: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4922 ; AMDGPU2: attributes #[[ATTR3]] = { nounwind }
4923 ; AMDGPU2: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4924 ; AMDGPU2: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4925 ; AMDGPU2: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4926 ; AMDGPU2: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4927 ; AMDGPU2: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4928 ; AMDGPU2: attributes #[[ATTR9]] = { convergent nounwind }
4929 ; AMDGPU2: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4930 ; AMDGPU2: attributes #[[ATTR11]] = { convergent }
4932 ; AMDGPU3: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4933 ; AMDGPU3: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4934 ; AMDGPU3: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4935 ; AMDGPU3: attributes #[[ATTR3]] = { nounwind }
4936 ; AMDGPU3: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4937 ; AMDGPU3: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4938 ; AMDGPU3: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4939 ; AMDGPU3: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4940 ; AMDGPU3: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4941 ; AMDGPU3: attributes #[[ATTR9]] = { convergent nounwind }
4942 ; AMDGPU3: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4943 ; AMDGPU3: attributes #[[ATTR11]] = { convergent }
4945 ; NVPTX2: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4946 ; NVPTX2: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4947 ; NVPTX2: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4948 ; NVPTX2: attributes #[[ATTR3]] = { nounwind }
4949 ; NVPTX2: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4950 ; NVPTX2: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4951 ; NVPTX2: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4952 ; NVPTX2: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4953 ; NVPTX2: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4954 ; NVPTX2: attributes #[[ATTR9]] = { convergent nounwind }
4955 ; NVPTX2: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4956 ; NVPTX2: attributes #[[ATTR11]] = { convergent }
4958 ; NVPTX3: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4959 ; NVPTX3: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4960 ; NVPTX3: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4961 ; NVPTX3: attributes #[[ATTR3]] = { nounwind }
4962 ; NVPTX3: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4963 ; NVPTX3: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4964 ; NVPTX3: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4965 ; NVPTX3: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4966 ; NVPTX3: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4967 ; NVPTX3: attributes #[[ATTR9]] = { convergent nounwind }
4968 ; NVPTX3: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4969 ; NVPTX3: attributes #[[ATTR11]] = { convergent }
4971 ; AMDGPU1: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
4972 ; AMDGPU1: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
4973 ; AMDGPU1: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
4974 ; AMDGPU1: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
4975 ; AMDGPU1: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
4976 ; AMDGPU1: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
4977 ; AMDGPU1: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
4978 ; AMDGPU1: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
4979 ; AMDGPU1: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
4980 ; AMDGPU1: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
4981 ; AMDGPU1: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
4982 ; AMDGPU1: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
4983 ; AMDGPU1: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
4984 ; AMDGPU1: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
4985 ; AMDGPU1: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
4986 ; AMDGPU1: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
4987 ; AMDGPU1: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
4988 ; AMDGPU1: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
4989 ; AMDGPU1: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
4991 ; NVPTX1: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
4992 ; NVPTX1: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
4993 ; NVPTX1: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
4994 ; NVPTX1: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
4995 ; NVPTX1: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
4996 ; NVPTX1: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
4997 ; NVPTX1: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
4998 ; NVPTX1: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
4999 ; NVPTX1: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
5000 ; NVPTX1: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
5001 ; NVPTX1: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
5002 ; NVPTX1: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
5003 ; NVPTX1: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
5004 ; NVPTX1: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
5005 ; NVPTX1: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
5006 ; NVPTX1: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
5007 ; NVPTX1: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
5008 ; NVPTX1: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
5009 ; NVPTX1: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
5011 ; AMDGPU2: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
5012 ; AMDGPU2: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
5013 ; AMDGPU2: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
5014 ; AMDGPU2: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
5015 ; AMDGPU2: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
5016 ; AMDGPU2: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
5017 ; AMDGPU2: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
5018 ; AMDGPU2: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
5019 ; AMDGPU2: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
5020 ; AMDGPU2: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
5021 ; AMDGPU2: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
5022 ; AMDGPU2: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
5023 ; AMDGPU2: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
5024 ; AMDGPU2: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
5025 ; AMDGPU2: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
5026 ; AMDGPU2: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
5027 ; AMDGPU2: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
5028 ; AMDGPU2: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
5029 ; AMDGPU2: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
5031 ; AMDGPU3: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
5032 ; AMDGPU3: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
5033 ; AMDGPU3: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
5034 ; AMDGPU3: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
5035 ; AMDGPU3: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
5036 ; AMDGPU3: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
5037 ; AMDGPU3: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
5038 ; AMDGPU3: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
5039 ; AMDGPU3: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
5040 ; AMDGPU3: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
5041 ; AMDGPU3: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
5042 ; AMDGPU3: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
5043 ; AMDGPU3: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
5044 ; AMDGPU3: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
5045 ; AMDGPU3: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
5046 ; AMDGPU3: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
5047 ; AMDGPU3: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
5048 ; AMDGPU3: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
5049 ; AMDGPU3: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
5051 ; NVPTX2: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
5052 ; NVPTX2: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
5053 ; NVPTX2: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
5054 ; NVPTX2: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
5055 ; NVPTX2: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
5056 ; NVPTX2: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
5057 ; NVPTX2: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
5058 ; NVPTX2: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
5059 ; NVPTX2: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
5060 ; NVPTX2: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
5061 ; NVPTX2: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
5062 ; NVPTX2: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
5063 ; NVPTX2: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
5064 ; NVPTX2: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
5065 ; NVPTX2: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
5066 ; NVPTX2: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
5067 ; NVPTX2: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
5068 ; NVPTX2: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
5069 ; NVPTX2: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
5071 ; NVPTX3: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
5072 ; NVPTX3: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
5073 ; NVPTX3: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
5074 ; NVPTX3: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
5075 ; NVPTX3: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
5076 ; NVPTX3: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
5077 ; NVPTX3: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
5078 ; NVPTX3: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
5079 ; NVPTX3: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
5080 ; NVPTX3: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
5081 ; NVPTX3: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
5082 ; NVPTX3: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
5083 ; NVPTX3: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
5084 ; NVPTX3: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
5085 ; NVPTX3: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
5086 ; NVPTX3: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
5087 ; NVPTX3: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
5088 ; NVPTX3: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
5089 ; NVPTX3: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}