1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals --include-generated-funcs
2 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=AMDGPU
3 ; RUN: opt --mtriple=nvptx64-- -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=NVPTX
4 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -openmp-opt-disable-state-machine-rewrite -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=AMDGPU-DISABLED
5 ; RUN: opt --mtriple=nvptx64-- -openmp-opt-disable-state-machine-rewrite -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=NVPTX-DISABLED
10 ;; void unknown_pure(void) __attribute__((pure));
11 ;; [[omp::assume("omp_no_openmp")]] void unknown_no_openmp(void);
14 ;; void no_parallel_region_in_here(void) {
19 ;; void no_state_machine_needed() {
20 ;; #pragma omp target teams
22 ;; no_parallel_region_in_here();
23 ;; unknown_no_openmp();
27 ;; void simple_state_machine() {
28 ;; #pragma omp target teams
30 ;; unknown_no_openmp();
31 ;; #pragma omp parallel
33 ;; no_parallel_region_in_here();
34 ;; #pragma omp parallel
39 ;; void simple_state_machine_interprocedural_after(void);
40 ;; void simple_state_machine_interprocedural_before(void) {
41 ;; #pragma omp parallel
44 ;; void simple_state_machine_interprocedural() {
45 ;; #pragma omp target teams
47 ;; unknown_no_openmp();
48 ;; simple_state_machine_interprocedural_before();
49 ;; no_parallel_region_in_here();
50 ;; #pragma omp parallel
52 ;; simple_state_machine_interprocedural_after();
55 ;; void simple_state_machine_interprocedural_after(void) {
56 ;; #pragma omp parallel
60 ;; void simple_state_machine_with_fallback() {
61 ;; #pragma omp target teams
63 ;; #pragma omp parallel
66 ;; #pragma omp parallel
71 ;; void simple_state_machine_no_openmp_attr() {
72 ;; #pragma omp target teams
74 ;; #pragma omp parallel
76 ;; unknown_no_openmp();
77 ;; #pragma omp parallel
82 ;; void simple_state_machine_pure() {
83 ;; #pragma omp target teams
85 ;; unknown_no_openmp();
86 ;; #pragma omp parallel
89 ;; #pragma omp parallel
94 ;; int omp_get_thread_num();
95 ;; void simple_state_machine_interprocedural_nested_recursive_after(int);
96 ;; void simple_state_machine_interprocedural_nested_recursive_after_after(void);
97 ;; void simple_state_machine_interprocedural_nested_recursive() {
98 ;; #pragma omp target teams
100 ;; simple_state_machine_interprocedural_nested_recursive_after(
101 ;; omp_get_thread_num());
105 ;; void simple_state_machine_interprocedural_nested_recursive_after(int a) {
108 ;; simple_state_machine_interprocedural_nested_recursive_after(a - 1);
109 ;; simple_state_machine_interprocedural_nested_recursive_after_after();
111 ;; void simple_state_machine_interprocedural_nested_recursive_after_after(void) {
112 ;; #pragma omp parallel
116 ;; __attribute__((weak)) void weak_callee_empty(void) {}
117 ;; void no_state_machine_weak_callee() {
118 ;; #pragma omp target teams
119 ;; { weak_callee_empty(); }
122 %struct.ident_t = type { i32, i32, i32, i32, ptr }
123 %struct.KernelEnvironmentTy = type { %struct.ConfigurationEnvironmentTy, ptr, ptr }
124 %struct.ConfigurationEnvironmentTy = type { i8, i8, i8, i32, i32, i32, i32, i32, i32 }
126 @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
127 @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @0 }, align 8
128 @2 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @0 }, align 8
129 @G = external global i32, align 4
130 @3 = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @0 }, align 8
132 @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
133 @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
134 @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
135 @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
136 @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
137 @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
138 @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
139 @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null }
141 define weak void @__omp_offloading_14_a36502b_no_state_machine_needed_l14(ptr %dyn) #0 {
143 %.zero.addr = alloca i32, align 4
144 %.threadid_temp. = alloca i32, align 4
145 store i32 0, ptr %.zero.addr, align 4
146 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr %dyn)
147 %exec_user_code = icmp eq i32 %0, -1
148 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
150 user_code.entry: ; preds = %entry
151 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
152 store i32 %1, ptr %.threadid_temp., align 4
153 call void @__omp_outlined__(ptr %.threadid_temp., ptr %.zero.addr) #3
154 call void @__kmpc_target_deinit()
157 worker.exit: ; preds = %entry
161 ; Make it a weak definition so we will apply custom state machine rewriting but can't use the body in the reasoning.
162 define weak i32 @__kmpc_target_init(ptr, ptr) {
166 define internal void @__omp_outlined__(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
168 %.global_tid..addr = alloca ptr, align 8
169 %.bound_tid..addr = alloca ptr, align 8
170 store ptr %.global_tid., ptr %.global_tid..addr, align 8
171 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
172 call void @no_parallel_region_in_here() #7
173 call void @unknown_no_openmp() #8
177 define hidden void @no_parallel_region_in_here() #1 {
179 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
180 %1 = call i32 @__kmpc_single(ptr @2, i32 %0)
181 %2 = icmp ne i32 %1, 0
182 br i1 %2, label %omp_if.then, label %omp_if.end
184 omp_if.then: ; preds = %entry
185 store i32 0, ptr @G, align 4
186 call void @__kmpc_end_single(ptr @2, i32 %0)
189 omp_if.end: ; preds = %omp_if.then, %entry
190 call void @__kmpc_barrier(ptr @3, i32 %0)
194 declare void @unknown_no_openmp() #2
196 declare i32 @__kmpc_global_thread_num(ptr) #3
198 declare void @__kmpc_target_deinit()
200 define weak void @__omp_offloading_14_a36502b_simple_state_machine_l22(ptr %dyn) #0 {
202 %.zero.addr = alloca i32, align 4
203 %.threadid_temp. = alloca i32, align 4
204 store i32 0, ptr %.zero.addr, align 4
205 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr %dyn)
206 %exec_user_code = icmp eq i32 %0, -1
207 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
209 user_code.entry: ; preds = %entry
210 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
211 store i32 %1, ptr %.threadid_temp., align 4
212 call void @__omp_outlined__1(ptr %.threadid_temp., ptr %.zero.addr) #3
213 call void @__kmpc_target_deinit()
216 worker.exit: ; preds = %entry
220 define internal void @__omp_outlined__1(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
222 %.global_tid..addr = alloca ptr, align 8
223 %.bound_tid..addr = alloca ptr, align 8
224 %captured_vars_addrs = alloca [0 x ptr], align 8
225 %captured_vars_addrs1 = alloca [0 x ptr], align 8
226 store ptr %.global_tid., ptr %.global_tid..addr, align 8
227 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
228 call void @unknown_no_openmp() #8
229 %0 = load ptr, ptr %.global_tid..addr, align 8
230 %1 = load i32, ptr %0, align 4
231 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr %captured_vars_addrs, i64 0)
232 call void @no_parallel_region_in_here() #7
233 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr %captured_vars_addrs1, i64 0)
237 define internal void @__omp_outlined__2(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
239 %.global_tid..addr = alloca ptr, align 8
240 %.bound_tid..addr = alloca ptr, align 8
241 store ptr %.global_tid., ptr %.global_tid..addr, align 8
242 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
247 declare void @p0() #4
249 define internal void @__omp_outlined__2_wrapper(i16 zeroext %0, i32 %1) #0 {
251 %.addr = alloca i16, align 2
252 %.addr1 = alloca i32, align 4
253 %.zero.addr = alloca i32, align 4
254 %global_args = alloca ptr, align 8
255 store i32 0, ptr %.zero.addr, align 4
256 store i16 %0, ptr %.addr, align 2
257 store i32 %1, ptr %.addr1, align 4
258 call void @__kmpc_get_shared_variables(ptr %global_args)
259 call void @__omp_outlined__2(ptr %.addr1, ptr %.zero.addr) #3
263 declare void @__kmpc_get_shared_variables(ptr)
265 declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64)
267 define internal void @__omp_outlined__3(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
269 %.global_tid..addr = alloca ptr, align 8
270 %.bound_tid..addr = alloca ptr, align 8
271 store ptr %.global_tid., ptr %.global_tid..addr, align 8
272 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
277 declare void @p1() #4
279 define internal void @__omp_outlined__3_wrapper(i16 zeroext %0, i32 %1) #0 {
281 %.addr = alloca i16, align 2
282 %.addr1 = alloca i32, align 4
283 %.zero.addr = alloca i32, align 4
284 %global_args = alloca ptr, align 8
285 store i32 0, ptr %.zero.addr, align 4
286 store i16 %0, ptr %.addr, align 2
287 store i32 %1, ptr %.addr1, align 4
288 call void @__kmpc_get_shared_variables(ptr %global_args)
289 call void @__omp_outlined__3(ptr %.addr1, ptr %.zero.addr) #3
293 define weak void @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39(ptr %dyn) #0 {
295 %.zero.addr = alloca i32, align 4
296 %.threadid_temp. = alloca i32, align 4
297 store i32 0, ptr %.zero.addr, align 4
298 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr %dyn)
299 %exec_user_code = icmp eq i32 %0, -1
300 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
302 user_code.entry: ; preds = %entry
303 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
304 store i32 %1, ptr %.threadid_temp., align 4
305 call void @__omp_outlined__4(ptr %.threadid_temp., ptr %.zero.addr) #3
306 call void @__kmpc_target_deinit()
309 worker.exit: ; preds = %entry
313 define internal void @__omp_outlined__4(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
315 %.global_tid..addr = alloca ptr, align 8
316 %.bound_tid..addr = alloca ptr, align 8
317 %captured_vars_addrs = alloca [0 x ptr], align 8
318 store ptr %.global_tid., ptr %.global_tid..addr, align 8
319 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
320 call void @unknown_no_openmp() #8
321 call void @simple_state_machine_interprocedural_before() #7
322 call void @no_parallel_region_in_here() #7
323 %0 = load ptr, ptr %.global_tid..addr, align 8
324 %1 = load i32, ptr %0, align 4
325 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr %captured_vars_addrs, i64 0)
326 call void @simple_state_machine_interprocedural_after() #7
330 define hidden void @simple_state_machine_interprocedural_before() #1 {
332 %captured_vars_addrs = alloca [0 x ptr], align 8
333 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
334 call void @__kmpc_parallel_51(ptr @2, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr %captured_vars_addrs, i64 0)
338 define internal void @__omp_outlined__5(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
340 %.global_tid..addr = alloca ptr, align 8
341 %.bound_tid..addr = alloca ptr, align 8
342 store ptr %.global_tid., ptr %.global_tid..addr, align 8
343 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
348 define internal void @__omp_outlined__5_wrapper(i16 zeroext %0, i32 %1) #0 {
350 %.addr = alloca i16, align 2
351 %.addr1 = alloca i32, align 4
352 %.zero.addr = alloca i32, align 4
353 %global_args = alloca ptr, align 8
354 store i32 0, ptr %.zero.addr, align 4
355 store i16 %0, ptr %.addr, align 2
356 store i32 %1, ptr %.addr1, align 4
357 call void @__kmpc_get_shared_variables(ptr %global_args)
358 call void @__omp_outlined__5(ptr %.addr1, ptr %.zero.addr) #3
362 define hidden void @simple_state_machine_interprocedural_after() #1 {
364 %captured_vars_addrs = alloca [0 x ptr], align 8
365 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
366 call void @__kmpc_parallel_51(ptr @2, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr %captured_vars_addrs, i64 0)
370 define weak void @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55(ptr %dyn) #0 {
372 %.zero.addr = alloca i32, align 4
373 %.threadid_temp. = alloca i32, align 4
374 store i32 0, ptr %.zero.addr, align 4
375 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr %dyn)
376 %exec_user_code = icmp eq i32 %0, -1
377 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
379 user_code.entry: ; preds = %entry
380 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
381 store i32 %1, ptr %.threadid_temp., align 4
382 call void @__omp_outlined__6(ptr %.threadid_temp., ptr %.zero.addr) #3
383 call void @__kmpc_target_deinit()
386 worker.exit: ; preds = %entry
390 define internal void @__omp_outlined__6(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
392 %.global_tid..addr = alloca ptr, align 8
393 %.bound_tid..addr = alloca ptr, align 8
394 %captured_vars_addrs = alloca [0 x ptr], align 8
395 %captured_vars_addrs1 = alloca [0 x ptr], align 8
396 store ptr %.global_tid., ptr %.global_tid..addr, align 8
397 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
398 %0 = load ptr, ptr %.global_tid..addr, align 8
399 %1 = load i32, ptr %0, align 4
400 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr %captured_vars_addrs, i64 0)
401 %call = call i32 @unknown() #7
402 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr %captured_vars_addrs1, i64 0)
406 define internal void @__omp_outlined__7(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
408 %.global_tid..addr = alloca ptr, align 8
409 %.bound_tid..addr = alloca ptr, align 8
410 store ptr %.global_tid., ptr %.global_tid..addr, align 8
411 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
416 define internal void @__omp_outlined__7_wrapper(i16 zeroext %0, i32 %1) #0 {
418 %.addr = alloca i16, align 2
419 %.addr1 = alloca i32, align 4
420 %.zero.addr = alloca i32, align 4
421 %global_args = alloca ptr, align 8
422 store i32 0, ptr %.zero.addr, align 4
423 store i16 %0, ptr %.addr, align 2
424 store i32 %1, ptr %.addr1, align 4
425 call void @__kmpc_get_shared_variables(ptr %global_args)
426 call void @__omp_outlined__7(ptr %.addr1, ptr %.zero.addr) #3
430 declare i32 @unknown() #4
432 define internal void @__omp_outlined__8(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
434 %.global_tid..addr = alloca ptr, align 8
435 %.bound_tid..addr = alloca ptr, align 8
436 store ptr %.global_tid., ptr %.global_tid..addr, align 8
437 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
442 define internal void @__omp_outlined__8_wrapper(i16 zeroext %0, i32 %1) #0 {
444 %.addr = alloca i16, align 2
445 %.addr1 = alloca i32, align 4
446 %.zero.addr = alloca i32, align 4
447 %global_args = alloca ptr, align 8
448 store i32 0, ptr %.zero.addr, align 4
449 store i16 %0, ptr %.addr, align 2
450 store i32 %1, ptr %.addr1, align 4
451 call void @__kmpc_get_shared_variables(ptr %global_args)
452 call void @__omp_outlined__8(ptr %.addr1, ptr %.zero.addr) #3
456 define weak void @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66(ptr %dyn) #0 {
458 %.zero.addr = alloca i32, align 4
459 %.threadid_temp. = alloca i32, align 4
460 store i32 0, ptr %.zero.addr, align 4
461 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr %dyn)
462 %exec_user_code = icmp eq i32 %0, -1
463 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
465 user_code.entry: ; preds = %entry
466 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
467 store i32 %1, ptr %.threadid_temp., align 4
468 call void @__omp_outlined__9(ptr %.threadid_temp., ptr %.zero.addr) #3
469 call void @__kmpc_target_deinit()
472 worker.exit: ; preds = %entry
476 define internal void @__omp_outlined__9(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
478 %.global_tid..addr = alloca ptr, align 8
479 %.bound_tid..addr = alloca ptr, align 8
480 %captured_vars_addrs = alloca [0 x ptr], align 8
481 %captured_vars_addrs1 = alloca [0 x ptr], align 8
482 store ptr %.global_tid., ptr %.global_tid..addr, align 8
483 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
484 %0 = load ptr, ptr %.global_tid..addr, align 8
485 %1 = load i32, ptr %0, align 4
486 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr %captured_vars_addrs, i64 0)
487 call void @unknown_no_openmp() #8
488 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr %captured_vars_addrs1, i64 0)
492 define internal void @__omp_outlined__10(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
494 %.global_tid..addr = alloca ptr, align 8
495 %.bound_tid..addr = alloca ptr, align 8
496 store ptr %.global_tid., ptr %.global_tid..addr, align 8
497 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
502 define internal void @__omp_outlined__10_wrapper(i16 zeroext %0, i32 %1) #0 {
504 %.addr = alloca i16, align 2
505 %.addr1 = alloca i32, align 4
506 %.zero.addr = alloca i32, align 4
507 %global_args = alloca ptr, align 8
508 store i32 0, ptr %.zero.addr, align 4
509 store i16 %0, ptr %.addr, align 2
510 store i32 %1, ptr %.addr1, align 4
511 call void @__kmpc_get_shared_variables(ptr %global_args)
512 call void @__omp_outlined__10(ptr %.addr1, ptr %.zero.addr) #3
516 define internal void @__omp_outlined__11(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
518 %.global_tid..addr = alloca ptr, align 8
519 %.bound_tid..addr = alloca ptr, align 8
520 store ptr %.global_tid., ptr %.global_tid..addr, align 8
521 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
526 define internal void @__omp_outlined__11_wrapper(i16 zeroext %0, i32 %1) #0 {
528 %.addr = alloca i16, align 2
529 %.addr1 = alloca i32, align 4
530 %.zero.addr = alloca i32, align 4
531 %global_args = alloca ptr, align 8
532 store i32 0, ptr %.zero.addr, align 4
533 store i16 %0, ptr %.addr, align 2
534 store i32 %1, ptr %.addr1, align 4
535 call void @__kmpc_get_shared_variables(ptr %global_args)
536 call void @__omp_outlined__11(ptr %.addr1, ptr %.zero.addr) #3
540 define weak void @__omp_offloading_14_a36502b_simple_state_machine_pure_l77(ptr %dyn) #0 {
542 %.zero.addr = alloca i32, align 4
543 %.threadid_temp. = alloca i32, align 4
544 store i32 0, ptr %.zero.addr, align 4
545 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr %dyn)
546 %exec_user_code = icmp eq i32 %0, -1
547 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
549 user_code.entry: ; preds = %entry
550 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
551 store i32 %1, ptr %.threadid_temp., align 4
552 call void @__omp_outlined__12(ptr %.threadid_temp., ptr %.zero.addr) #3
553 call void @__kmpc_target_deinit()
556 worker.exit: ; preds = %entry
560 define internal void @__omp_outlined__12(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
562 %.global_tid..addr = alloca ptr, align 8
563 %.bound_tid..addr = alloca ptr, align 8
564 %captured_vars_addrs = alloca [0 x ptr], align 8
565 %captured_vars_addrs1 = alloca [0 x ptr], align 8
566 store ptr %.global_tid., ptr %.global_tid..addr, align 8
567 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
568 call void @unknown_no_openmp() #8
569 %0 = load ptr, ptr %.global_tid..addr, align 8
570 %1 = load i32, ptr %0, align 4
571 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr %captured_vars_addrs, i64 0)
572 call void @unknown_pure() #9
573 call void @__kmpc_parallel_51(ptr @1, i32 %1, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr %captured_vars_addrs1, i64 0)
577 define internal void @__omp_outlined__13(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
579 %.global_tid..addr = alloca ptr, align 8
580 %.bound_tid..addr = alloca ptr, align 8
581 store ptr %.global_tid., ptr %.global_tid..addr, align 8
582 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
587 define internal void @__omp_outlined__13_wrapper(i16 zeroext %0, i32 %1) #0 {
589 %.addr = alloca i16, align 2
590 %.addr1 = alloca i32, align 4
591 %.zero.addr = alloca i32, align 4
592 %global_args = alloca ptr, align 8
593 store i32 0, ptr %.zero.addr, align 4
594 store i16 %0, ptr %.addr, align 2
595 store i32 %1, ptr %.addr1, align 4
596 call void @__kmpc_get_shared_variables(ptr %global_args)
597 call void @__omp_outlined__13(ptr %.addr1, ptr %.zero.addr) #3
601 declare void @unknown_pure() #5
603 define internal void @__omp_outlined__14(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
605 %.global_tid..addr = alloca ptr, align 8
606 %.bound_tid..addr = alloca ptr, align 8
607 store ptr %.global_tid., ptr %.global_tid..addr, align 8
608 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
613 define internal void @__omp_outlined__14_wrapper(i16 zeroext %0, i32 %1) #0 {
615 %.addr = alloca i16, align 2
616 %.addr1 = alloca i32, align 4
617 %.zero.addr = alloca i32, align 4
618 %global_args = alloca ptr, align 8
619 store i32 0, ptr %.zero.addr, align 4
620 store i16 %0, ptr %.addr, align 2
621 store i32 %1, ptr %.addr1, align 4
622 call void @__kmpc_get_shared_variables(ptr %global_args)
623 call void @__omp_outlined__14(ptr %.addr1, ptr %.zero.addr) #3
627 define weak void @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92(ptr %dyn) #0 {
629 %.zero.addr = alloca i32, align 4
630 %.threadid_temp. = alloca i32, align 4
631 store i32 0, ptr %.zero.addr, align 4
632 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr %dyn)
633 %exec_user_code = icmp eq i32 %0, -1
634 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
636 user_code.entry: ; preds = %entry
637 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
638 store i32 %1, ptr %.threadid_temp., align 4
639 call void @__omp_outlined__15(ptr %.threadid_temp., ptr %.zero.addr) #3
640 call void @__kmpc_target_deinit()
643 worker.exit: ; preds = %entry
647 define internal void @__omp_outlined__15(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
649 %.global_tid..addr = alloca ptr, align 8
650 %.bound_tid..addr = alloca ptr, align 8
651 store ptr %.global_tid., ptr %.global_tid..addr, align 8
652 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
653 %call = call i32 @omp_get_thread_num() #7
654 call void @simple_state_machine_interprocedural_nested_recursive_after(i32 %call) #7
658 define hidden void @simple_state_machine_interprocedural_nested_recursive_after(i32 %a) #1 {
660 %a.addr = alloca i32, align 4
661 store i32 %a, ptr %a.addr, align 4
662 %0 = load i32, ptr %a.addr, align 4
663 %cmp = icmp eq i32 %0, 0
664 br i1 %cmp, label %if.then, label %if.end
666 if.then: ; preds = %entry
669 if.end: ; preds = %entry
670 %1 = load i32, ptr %a.addr, align 4
671 %sub = sub nsw i32 %1, 1
672 call void @simple_state_machine_interprocedural_nested_recursive_after(i32 %sub) #7
673 call void @simple_state_machine_interprocedural_nested_recursive_after_after() #7
676 return: ; preds = %if.end, %if.then
680 declare i32 @omp_get_thread_num(...) #4
682 define weak void @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112(ptr %dyn) #0 {
684 %.zero.addr = alloca i32, align 4
685 %.threadid_temp. = alloca i32, align 4
686 store i32 0, ptr %.zero.addr, align 4
687 %0 = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr %dyn)
688 %exec_user_code = icmp eq i32 %0, -1
689 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
691 user_code.entry: ; preds = %entry
692 %1 = call i32 @__kmpc_global_thread_num(ptr @1)
693 store i32 %1, ptr %.threadid_temp., align 4
694 call void @__omp_outlined__16(ptr %.threadid_temp., ptr %.zero.addr) #3
695 call void @__kmpc_target_deinit()
698 worker.exit: ; preds = %entry
702 define internal void @__omp_outlined__16(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
704 %.global_tid..addr = alloca ptr, align 8
705 %.bound_tid..addr = alloca ptr, align 8
706 store ptr %.global_tid., ptr %.global_tid..addr, align 8
707 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
708 call void @weak_callee_empty() #7
712 define weak hidden void @weak_callee_empty() #1 {
717 declare i32 @__kmpc_single(ptr, i32) #6
719 declare void @__kmpc_end_single(ptr, i32) #6
721 declare void @__kmpc_barrier(ptr, i32) #6
723 define internal void @__omp_outlined__17(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
725 %.global_tid..addr = alloca ptr, align 8
726 %.bound_tid..addr = alloca ptr, align 8
727 store ptr %.global_tid., ptr %.global_tid..addr, align 8
728 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
733 define internal void @__omp_outlined__17_wrapper(i16 zeroext %0, i32 %1) #0 {
735 %.addr = alloca i16, align 2
736 %.addr1 = alloca i32, align 4
737 %.zero.addr = alloca i32, align 4
738 %global_args = alloca ptr, align 8
739 store i32 0, ptr %.zero.addr, align 4
740 store i16 %0, ptr %.addr, align 2
741 store i32 %1, ptr %.addr1, align 4
742 call void @__kmpc_get_shared_variables(ptr %global_args)
743 call void @__omp_outlined__17(ptr %.addr1, ptr %.zero.addr) #3
747 define internal void @__omp_outlined__18(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
749 %.global_tid..addr = alloca ptr, align 8
750 %.bound_tid..addr = alloca ptr, align 8
751 store ptr %.global_tid., ptr %.global_tid..addr, align 8
752 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
757 define internal void @__omp_outlined__18_wrapper(i16 zeroext %0, i32 %1) #0 {
759 %.addr = alloca i16, align 2
760 %.addr1 = alloca i32, align 4
761 %.zero.addr = alloca i32, align 4
762 %global_args = alloca ptr, align 8
763 store i32 0, ptr %.zero.addr, align 4
764 store i16 %0, ptr %.addr, align 2
765 store i32 %1, ptr %.addr1, align 4
766 call void @__kmpc_get_shared_variables(ptr %global_args)
767 call void @__omp_outlined__18(ptr %.addr1, ptr %.zero.addr) #3
771 define hidden void @simple_state_machine_interprocedural_nested_recursive_after_after() #1 {
773 %captured_vars_addrs = alloca [0 x ptr], align 8
774 %0 = call i32 @__kmpc_global_thread_num(ptr @2)
775 call void @__kmpc_parallel_51(ptr @2, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr %captured_vars_addrs, i64 0)
779 define internal void @__omp_outlined__19(ptr noalias %.global_tid., ptr noalias %.bound_tid.) #0 {
781 %.global_tid..addr = alloca ptr, align 8
782 %.bound_tid..addr = alloca ptr, align 8
783 store ptr %.global_tid., ptr %.global_tid..addr, align 8
784 store ptr %.bound_tid., ptr %.bound_tid..addr, align 8
789 define internal void @__omp_outlined__19_wrapper(i16 zeroext %0, i32 %1) #0 {
791 %.addr = alloca i16, align 2
792 %.addr1 = alloca i32, align 4
793 %.zero.addr = alloca i32, align 4
794 %global_args = alloca ptr, align 8
795 store i32 0, ptr %.zero.addr, align 4
796 store i16 %0, ptr %.addr, align 2
797 store i32 %1, ptr %.addr1, align 4
798 call void @__kmpc_get_shared_variables(ptr %global_args)
799 call void @__omp_outlined__19(ptr %.addr1, ptr %.zero.addr) #3
803 attributes #0 = { convergent noinline norecurse nounwind "kernel" "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
804 attributes #1 = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
805 attributes #2 = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
806 attributes #3 = { nounwind }
807 attributes #4 = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
808 attributes #5 = { convergent nounwind readonly willreturn "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
809 attributes #6 = { convergent nounwind }
810 attributes #7 = { convergent }
811 attributes #8 = { convergent "llvm.assume"="omp_no_openmp" }
812 attributes #9 = { convergent nounwind readonly willreturn }
814 !omp_offload.info = !{!0, !1, !2, !3, !4, !5, !6, !7}
815 !nvvm.annotations = !{!8, !9, !10, !11, !12, !13, !14, !15}
816 !llvm.module.flags = !{!16, !17, !18}
818 !0 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
819 !1 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
820 !2 = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
821 !3 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
822 !4 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
823 !5 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
824 !6 = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
825 !7 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
826 !8 = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
827 !9 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
828 !10 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
829 !11 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
830 !12 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
831 !13 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
832 !14 = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
833 !15 = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
834 !16 = !{i32 1, !"wchar_size", i32 4}
835 !17 = !{i32 7, !"openmp", i32 50}
836 !18 = !{i32 7, !"openmp-device", i32 50}
838 ; AMDGPU: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
839 ; AMDGPU: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
840 ; AMDGPU: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
841 ; AMDGPU: @G = external global i32, align 4
842 ; AMDGPU: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
843 ; AMDGPU: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
844 ; AMDGPU: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
845 ; AMDGPU: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
846 ; AMDGPU: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
847 ; AMDGPU: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
848 ; AMDGPU: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
849 ; AMDGPU: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
850 ; AMDGPU: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
851 ; AMDGPU: @__omp_outlined__2_wrapper.ID = private constant i8 undef
852 ; AMDGPU: @__omp_outlined__3_wrapper.ID = private constant i8 undef
853 ; AMDGPU: @__omp_outlined__5_wrapper.ID = private constant i8 undef
854 ; AMDGPU: @__omp_outlined__7_wrapper.ID = private constant i8 undef
855 ; AMDGPU: @__omp_outlined__8_wrapper.ID = private constant i8 undef
856 ; AMDGPU: @__omp_outlined__10_wrapper.ID = private constant i8 undef
857 ; AMDGPU: @__omp_outlined__11_wrapper.ID = private constant i8 undef
858 ; AMDGPU: @__omp_outlined__13_wrapper.ID = private constant i8 undef
859 ; AMDGPU: @__omp_outlined__14_wrapper.ID = private constant i8 undef
861 ; NVPTX: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
862 ; NVPTX: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
863 ; NVPTX: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
864 ; NVPTX: @G = external global i32, align 4
865 ; NVPTX: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
866 ; NVPTX: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
867 ; NVPTX: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
868 ; NVPTX: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
869 ; NVPTX: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
870 ; NVPTX: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
871 ; NVPTX: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
872 ; NVPTX: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
873 ; NVPTX: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
874 ; NVPTX: @__omp_outlined__2_wrapper.ID = private constant i8 undef
875 ; NVPTX: @__omp_outlined__3_wrapper.ID = private constant i8 undef
876 ; NVPTX: @__omp_outlined__5_wrapper.ID = private constant i8 undef
877 ; NVPTX: @__omp_outlined__7_wrapper.ID = private constant i8 undef
878 ; NVPTX: @__omp_outlined__8_wrapper.ID = private constant i8 undef
879 ; NVPTX: @__omp_outlined__10_wrapper.ID = private constant i8 undef
880 ; NVPTX: @__omp_outlined__11_wrapper.ID = private constant i8 undef
881 ; NVPTX: @__omp_outlined__13_wrapper.ID = private constant i8 undef
882 ; NVPTX: @__omp_outlined__14_wrapper.ID = private constant i8 undef
884 ; AMDGPU-DISABLED: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
885 ; AMDGPU-DISABLED: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
886 ; AMDGPU-DISABLED: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
887 ; AMDGPU-DISABLED: @G = external global i32, align 4
888 ; AMDGPU-DISABLED: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
889 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
890 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
891 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
892 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
893 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
894 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
895 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
896 ; AMDGPU-DISABLED: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
898 ; NVPTX-DISABLED: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c"
899 ; NVPTX-DISABLED: @[[GLOB1:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8
900 ; NVPTX-DISABLED: @[[GLOB2:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
901 ; NVPTX-DISABLED: @G = external global i32, align 4
902 ; NVPTX-DISABLED: @[[GLOB3:[0-9]+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, ptr @[[GLOB0]] }, align 8
903 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
904 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
905 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
906 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
907 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
908 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
909 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
910 ; NVPTX-DISABLED: @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null }
912 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
913 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
914 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
915 ; AMDGPU-NEXT: entry:
916 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
917 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
918 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
919 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
920 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
921 ; AMDGPU: user_code.entry:
922 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
923 ; AMDGPU-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
924 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
925 ; AMDGPU-NEXT: ret void
926 ; AMDGPU: worker.exit:
927 ; AMDGPU-NEXT: ret void
930 ; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init
931 ; AMDGPU-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) {
932 ; AMDGPU-NEXT: ret i32 0
935 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
936 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__
937 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
938 ; AMDGPU-NEXT: entry:
939 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
940 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
941 ; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
942 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
943 ; AMDGPU-NEXT: ret void
946 ; AMDGPU: Function Attrs: convergent noinline nounwind
947 ; AMDGPU-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
948 ; AMDGPU-SAME: () #[[ATTR1:[0-9]+]] {
949 ; AMDGPU-NEXT: entry:
950 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
951 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
952 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
953 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
954 ; AMDGPU: omp_if.then:
955 ; AMDGPU-NEXT: store i32 0, ptr @G, align 4
956 ; AMDGPU-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
957 ; AMDGPU-NEXT: br label [[OMP_IF_END]]
958 ; AMDGPU: omp_if.end:
959 ; AMDGPU-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
960 ; AMDGPU-NEXT: ret void
963 ; AMDGPU: Function Attrs: convergent noinline nounwind
964 ; AMDGPU-LABEL: define {{[^@]+}}@no_parallel_region_in_here
965 ; AMDGPU-SAME: () #[[ATTR1]] {
966 ; AMDGPU-NEXT: entry:
967 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
968 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
969 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
970 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
971 ; AMDGPU: omp_if.then:
972 ; AMDGPU-NEXT: store i32 0, ptr @G, align 4
973 ; AMDGPU-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
974 ; AMDGPU-NEXT: br label [[OMP_IF_END]]
975 ; AMDGPU: omp_if.end:
976 ; AMDGPU-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
977 ; AMDGPU-NEXT: ret void
980 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
981 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
982 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
983 ; AMDGPU-NEXT: entry:
984 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
985 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
986 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
987 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
988 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
989 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
990 ; AMDGPU: is_worker_check:
991 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
992 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
993 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
994 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
995 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
996 ; AMDGPU: worker_state_machine.begin:
997 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
998 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
999 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
1000 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1001 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
1002 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1003 ; AMDGPU: worker_state_machine.finished:
1004 ; AMDGPU-NEXT: ret void
1005 ; AMDGPU: worker_state_machine.is_active.check:
1006 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1007 ; AMDGPU: worker_state_machine.parallel_region.check:
1008 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__2_wrapper.ID
1009 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1010 ; AMDGPU: worker_state_machine.parallel_region.execute:
1011 ; AMDGPU-NEXT: call void @__omp_outlined__2_wrapper(i16 0, i32 [[TMP0]])
1012 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1013 ; AMDGPU: worker_state_machine.parallel_region.check1:
1014 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1015 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1016 ; AMDGPU-NEXT: call void @__omp_outlined__3_wrapper(i16 0, i32 [[TMP0]])
1017 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1018 ; AMDGPU: worker_state_machine.parallel_region.check3:
1019 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1020 ; AMDGPU: worker_state_machine.parallel_region.end:
1021 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1022 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1023 ; AMDGPU: worker_state_machine.done.barrier:
1024 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1025 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1026 ; AMDGPU: thread.user_code.check:
1027 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1028 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1029 ; AMDGPU: user_code.entry:
1030 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1031 ; AMDGPU-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1032 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
1033 ; AMDGPU-NEXT: ret void
1034 ; AMDGPU: worker.exit:
1035 ; AMDGPU-NEXT: ret void
1038 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1039 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__1
1040 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1041 ; AMDGPU-NEXT: entry:
1042 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1043 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1044 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1045 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1046 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1047 ; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
1048 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1049 ; AMDGPU-NEXT: ret void
1052 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1053 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__2
1054 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1055 ; AMDGPU-NEXT: entry:
1056 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1057 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1058 ; AMDGPU-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
1059 ; AMDGPU-NEXT: ret void
1062 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1063 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
1064 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1065 ; AMDGPU-NEXT: entry:
1066 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1067 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1068 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1069 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1070 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1071 ; AMDGPU-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1072 ; AMDGPU-NEXT: ret void
1075 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1076 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3
1077 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1078 ; AMDGPU-NEXT: entry:
1079 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1080 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1081 ; AMDGPU-NEXT: call void @p1() #[[ATTR11]]
1082 ; AMDGPU-NEXT: ret void
1085 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1086 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
1087 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1088 ; AMDGPU-NEXT: entry:
1089 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1090 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1091 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1092 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1093 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1094 ; AMDGPU-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1095 ; AMDGPU-NEXT: ret void
1098 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1099 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
1100 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1101 ; AMDGPU-NEXT: entry:
1102 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
1103 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1104 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1105 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
1106 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1107 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1108 ; AMDGPU: is_worker_check:
1109 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1110 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1111 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1112 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1113 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1114 ; AMDGPU: worker_state_machine.begin:
1115 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1116 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
1117 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
1118 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1119 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
1120 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1121 ; AMDGPU: worker_state_machine.finished:
1122 ; AMDGPU-NEXT: ret void
1123 ; AMDGPU: worker_state_machine.is_active.check:
1124 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1125 ; AMDGPU: worker_state_machine.parallel_region.check:
1126 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__17_wrapper
1127 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1128 ; AMDGPU: worker_state_machine.parallel_region.execute:
1129 ; AMDGPU-NEXT: call void @__omp_outlined__17_wrapper(i16 0, i32 [[TMP0]])
1130 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1131 ; AMDGPU: worker_state_machine.parallel_region.check1:
1132 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__5_wrapper.ID
1133 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1134 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1135 ; AMDGPU-NEXT: call void @__omp_outlined__5_wrapper(i16 0, i32 [[TMP0]])
1136 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1137 ; AMDGPU: worker_state_machine.parallel_region.check3:
1138 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE5:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK6:%.*]]
1139 ; AMDGPU: worker_state_machine.parallel_region.execute5:
1140 ; AMDGPU-NEXT: call void @__omp_outlined__18_wrapper(i16 0, i32 [[TMP0]])
1141 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1142 ; AMDGPU: worker_state_machine.parallel_region.check6:
1143 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1144 ; AMDGPU: worker_state_machine.parallel_region.end:
1145 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1146 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1147 ; AMDGPU: worker_state_machine.done.barrier:
1148 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1149 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1150 ; AMDGPU: thread.user_code.check:
1151 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1152 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1153 ; AMDGPU: user_code.entry:
1154 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1155 ; AMDGPU-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1156 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
1157 ; AMDGPU-NEXT: ret void
1158 ; AMDGPU: worker.exit:
1159 ; AMDGPU-NEXT: ret void
1162 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1163 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__4
1164 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1165 ; AMDGPU-NEXT: entry:
1166 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1167 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1168 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1169 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
1170 ; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
1171 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1172 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
1173 ; AMDGPU-NEXT: ret void
1176 ; AMDGPU: Function Attrs: noinline nounwind
1177 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
1178 ; AMDGPU-SAME: () #[[ATTR6:[0-9]+]] {
1179 ; AMDGPU-NEXT: entry:
1180 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1181 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1182 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1183 ; AMDGPU-NEXT: ret void
1186 ; AMDGPU: Function Attrs: convergent noinline nounwind
1187 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
1188 ; AMDGPU-SAME: () #[[ATTR1]] {
1189 ; AMDGPU-NEXT: entry:
1190 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1191 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1192 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1193 ; AMDGPU-NEXT: ret void
1196 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1197 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5
1198 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1199 ; AMDGPU-NEXT: entry:
1200 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1201 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1202 ; AMDGPU-NEXT: call void @p1() #[[ATTR11]]
1203 ; AMDGPU-NEXT: ret void
1206 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1207 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
1208 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1209 ; AMDGPU-NEXT: entry:
1210 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1211 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1212 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1213 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1214 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1215 ; AMDGPU-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1216 ; AMDGPU-NEXT: ret void
1219 ; AMDGPU: Function Attrs: noinline nounwind
1220 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
1221 ; AMDGPU-SAME: () #[[ATTR6]] {
1222 ; AMDGPU-NEXT: entry:
1223 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1224 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1225 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1226 ; AMDGPU-NEXT: ret void
1229 ; AMDGPU: Function Attrs: convergent noinline nounwind
1230 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
1231 ; AMDGPU-SAME: () #[[ATTR1]] {
1232 ; AMDGPU-NEXT: entry:
1233 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1234 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1235 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1236 ; AMDGPU-NEXT: ret void
1239 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1240 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
1241 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1242 ; AMDGPU-NEXT: entry:
1243 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
1244 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1245 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1246 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
1247 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1248 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1249 ; AMDGPU: is_worker_check:
1250 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1251 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1252 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1253 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1254 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1255 ; AMDGPU: worker_state_machine.begin:
1256 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1257 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
1258 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
1259 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1260 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
1261 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1262 ; AMDGPU: worker_state_machine.finished:
1263 ; AMDGPU-NEXT: ret void
1264 ; AMDGPU: worker_state_machine.is_active.check:
1265 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1266 ; AMDGPU: worker_state_machine.parallel_region.check:
1267 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__7_wrapper.ID
1268 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1269 ; AMDGPU: worker_state_machine.parallel_region.execute:
1270 ; AMDGPU-NEXT: call void @__omp_outlined__7_wrapper(i16 0, i32 [[TMP0]])
1271 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1272 ; AMDGPU: worker_state_machine.parallel_region.check1:
1273 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__8_wrapper.ID
1274 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]]
1275 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1276 ; AMDGPU-NEXT: call void @__omp_outlined__8_wrapper(i16 0, i32 [[TMP0]])
1277 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1278 ; AMDGPU: worker_state_machine.parallel_region.fallback.execute:
1279 ; AMDGPU-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]])
1280 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1281 ; AMDGPU: worker_state_machine.parallel_region.end:
1282 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1283 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1284 ; AMDGPU: worker_state_machine.done.barrier:
1285 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1286 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1287 ; AMDGPU: thread.user_code.check:
1288 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1289 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1290 ; AMDGPU: user_code.entry:
1291 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1292 ; AMDGPU-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1293 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
1294 ; AMDGPU-NEXT: ret void
1295 ; AMDGPU: worker.exit:
1296 ; AMDGPU-NEXT: ret void
1299 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1300 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__6
1301 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1302 ; AMDGPU-NEXT: entry:
1303 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1304 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1305 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1306 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1307 ; AMDGPU-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
1308 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1309 ; AMDGPU-NEXT: ret void
1312 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1313 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7
1314 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1315 ; AMDGPU-NEXT: entry:
1316 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1317 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1318 ; AMDGPU-NEXT: call void @p0() #[[ATTR11]]
1319 ; AMDGPU-NEXT: ret void
1322 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1323 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
1324 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1325 ; AMDGPU-NEXT: entry:
1326 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1327 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1328 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1329 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1330 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1331 ; AMDGPU-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1332 ; AMDGPU-NEXT: ret void
1335 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1336 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__8
1337 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1338 ; AMDGPU-NEXT: entry:
1339 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1340 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1341 ; AMDGPU-NEXT: call void @p1() #[[ATTR11]]
1342 ; AMDGPU-NEXT: ret void
1345 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1346 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
1347 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1348 ; AMDGPU-NEXT: entry:
1349 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1350 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1351 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1352 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1353 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1354 ; AMDGPU-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1355 ; AMDGPU-NEXT: ret void
1358 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1359 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
1360 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1361 ; AMDGPU-NEXT: entry:
1362 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
1363 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1364 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1365 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
1366 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1367 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1368 ; AMDGPU: is_worker_check:
1369 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1370 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1371 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1372 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1373 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1374 ; AMDGPU: worker_state_machine.begin:
1375 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1376 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
1377 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
1378 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1379 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
1380 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1381 ; AMDGPU: worker_state_machine.finished:
1382 ; AMDGPU-NEXT: ret void
1383 ; AMDGPU: worker_state_machine.is_active.check:
1384 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1385 ; AMDGPU: worker_state_machine.parallel_region.check:
1386 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__10_wrapper.ID
1387 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1388 ; AMDGPU: worker_state_machine.parallel_region.execute:
1389 ; AMDGPU-NEXT: call void @__omp_outlined__10_wrapper(i16 0, i32 [[TMP0]])
1390 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1391 ; AMDGPU: worker_state_machine.parallel_region.check1:
1392 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1393 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1394 ; AMDGPU-NEXT: call void @__omp_outlined__11_wrapper(i16 0, i32 [[TMP0]])
1395 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1396 ; AMDGPU: worker_state_machine.parallel_region.check3:
1397 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1398 ; AMDGPU: worker_state_machine.parallel_region.end:
1399 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1400 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1401 ; AMDGPU: worker_state_machine.done.barrier:
1402 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1403 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1404 ; AMDGPU: thread.user_code.check:
1405 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1406 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1407 ; AMDGPU: user_code.entry:
1408 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1409 ; AMDGPU-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1410 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
1411 ; AMDGPU-NEXT: ret void
1412 ; AMDGPU: worker.exit:
1413 ; AMDGPU-NEXT: ret void
1416 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1417 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__9
1418 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1419 ; AMDGPU-NEXT: entry:
1420 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1421 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1422 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1423 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1424 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1425 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1426 ; AMDGPU-NEXT: ret void
1429 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1430 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__10
1431 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1432 ; AMDGPU-NEXT: entry:
1433 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1434 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1435 ; AMDGPU-NEXT: call void @p0() #[[ATTR11]]
1436 ; AMDGPU-NEXT: ret void
1439 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1440 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
1441 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1442 ; AMDGPU-NEXT: entry:
1443 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1444 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1445 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1446 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1447 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1448 ; AMDGPU-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1449 ; AMDGPU-NEXT: ret void
1452 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1453 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__11
1454 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1455 ; AMDGPU-NEXT: entry:
1456 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1457 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1458 ; AMDGPU-NEXT: call void @p1() #[[ATTR11]]
1459 ; AMDGPU-NEXT: ret void
1462 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1463 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
1464 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1465 ; AMDGPU-NEXT: entry:
1466 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1467 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1468 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1469 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1470 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1471 ; AMDGPU-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1472 ; AMDGPU-NEXT: ret void
1475 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1476 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
1477 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1478 ; AMDGPU-NEXT: entry:
1479 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
1480 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1481 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1482 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
1483 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1484 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1485 ; AMDGPU: is_worker_check:
1486 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1487 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1488 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1489 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1490 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1491 ; AMDGPU: worker_state_machine.begin:
1492 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1493 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
1494 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
1495 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1496 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
1497 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1498 ; AMDGPU: worker_state_machine.finished:
1499 ; AMDGPU-NEXT: ret void
1500 ; AMDGPU: worker_state_machine.is_active.check:
1501 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1502 ; AMDGPU: worker_state_machine.parallel_region.check:
1503 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__13_wrapper.ID
1504 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1505 ; AMDGPU: worker_state_machine.parallel_region.execute:
1506 ; AMDGPU-NEXT: call void @__omp_outlined__13_wrapper(i16 0, i32 [[TMP0]])
1507 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1508 ; AMDGPU: worker_state_machine.parallel_region.check1:
1509 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1510 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1511 ; AMDGPU-NEXT: call void @__omp_outlined__14_wrapper(i16 0, i32 [[TMP0]])
1512 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1513 ; AMDGPU: worker_state_machine.parallel_region.check3:
1514 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1515 ; AMDGPU: worker_state_machine.parallel_region.end:
1516 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1517 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1518 ; AMDGPU: worker_state_machine.done.barrier:
1519 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1520 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1521 ; AMDGPU: thread.user_code.check:
1522 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1523 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1524 ; AMDGPU: user_code.entry:
1525 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1526 ; AMDGPU-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1527 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
1528 ; AMDGPU-NEXT: ret void
1529 ; AMDGPU: worker.exit:
1530 ; AMDGPU-NEXT: ret void
1533 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1534 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__12
1535 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1536 ; AMDGPU-NEXT: entry:
1537 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1538 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1539 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1540 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1541 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1542 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1543 ; AMDGPU-NEXT: ret void
1546 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1547 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__13
1548 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1549 ; AMDGPU-NEXT: entry:
1550 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1551 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1552 ; AMDGPU-NEXT: call void @p0() #[[ATTR11]]
1553 ; AMDGPU-NEXT: ret void
1556 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1557 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
1558 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1559 ; AMDGPU-NEXT: entry:
1560 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1561 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1562 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1563 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1564 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1565 ; AMDGPU-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1566 ; AMDGPU-NEXT: ret void
1569 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1570 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__14
1571 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1572 ; AMDGPU-NEXT: entry:
1573 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1574 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1575 ; AMDGPU-NEXT: call void @p1() #[[ATTR11]]
1576 ; AMDGPU-NEXT: ret void
1579 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1580 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
1581 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1582 ; AMDGPU-NEXT: entry:
1583 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1584 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1585 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1586 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1587 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1588 ; AMDGPU-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1589 ; AMDGPU-NEXT: ret void
1592 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1593 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
1594 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1595 ; AMDGPU-NEXT: entry:
1596 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1597 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1598 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
1599 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1600 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1601 ; AMDGPU: user_code.entry:
1602 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1603 ; AMDGPU-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1604 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
1605 ; AMDGPU-NEXT: ret void
1606 ; AMDGPU: worker.exit:
1607 ; AMDGPU-NEXT: ret void
1610 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1611 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__15
1612 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1613 ; AMDGPU-NEXT: entry:
1614 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1615 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1616 ; AMDGPU-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
1617 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
1618 ; AMDGPU-NEXT: ret void
1621 ; AMDGPU: Function Attrs: noinline nounwind
1622 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
1623 ; AMDGPU-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
1624 ; AMDGPU-NEXT: entry:
1625 ; AMDGPU-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
1626 ; AMDGPU-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
1627 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
1628 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
1629 ; AMDGPU-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
1631 ; AMDGPU-NEXT: br label [[RETURN:%.*]]
1633 ; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
1634 ; AMDGPU-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
1635 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
1636 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
1637 ; AMDGPU-NEXT: br label [[RETURN]]
1639 ; AMDGPU-NEXT: ret void
1642 ; AMDGPU: Function Attrs: convergent noinline nounwind
1643 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
1644 ; AMDGPU-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
1645 ; AMDGPU-NEXT: entry:
1646 ; AMDGPU-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
1647 ; AMDGPU-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
1648 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
1649 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
1650 ; AMDGPU-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
1652 ; AMDGPU-NEXT: br label [[RETURN:%.*]]
1654 ; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
1655 ; AMDGPU-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
1656 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
1657 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
1658 ; AMDGPU-NEXT: br label [[RETURN]]
1660 ; AMDGPU-NEXT: ret void
1663 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1664 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
1665 ; AMDGPU-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1666 ; AMDGPU-NEXT: entry:
1667 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
1668 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1669 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1670 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
1671 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1672 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1673 ; AMDGPU: is_worker_check:
1674 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1675 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1676 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1677 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1678 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1679 ; AMDGPU: worker_state_machine.begin:
1680 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1681 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr
1682 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]])
1683 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1684 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
1685 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1686 ; AMDGPU: worker_state_machine.finished:
1687 ; AMDGPU-NEXT: ret void
1688 ; AMDGPU: worker_state_machine.is_active.check:
1689 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1690 ; AMDGPU: worker_state_machine.parallel_region.fallback.execute:
1691 ; AMDGPU-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]])
1692 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1693 ; AMDGPU: worker_state_machine.parallel_region.end:
1694 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1695 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1696 ; AMDGPU: worker_state_machine.done.barrier:
1697 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1698 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1699 ; AMDGPU: thread.user_code.check:
1700 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1701 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1702 ; AMDGPU: user_code.entry:
1703 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1704 ; AMDGPU-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1705 ; AMDGPU-NEXT: call void @__kmpc_target_deinit()
1706 ; AMDGPU-NEXT: ret void
1707 ; AMDGPU: worker.exit:
1708 ; AMDGPU-NEXT: ret void
1711 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1712 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__16
1713 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1714 ; AMDGPU-NEXT: entry:
1715 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1716 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1717 ; AMDGPU-NEXT: call void @weak_callee_empty() #[[ATTR9]]
1718 ; AMDGPU-NEXT: ret void
1721 ; AMDGPU: Function Attrs: convergent noinline nounwind
1722 ; AMDGPU-LABEL: define {{[^@]+}}@weak_callee_empty
1723 ; AMDGPU-SAME: () #[[ATTR1]] {
1724 ; AMDGPU-NEXT: entry:
1725 ; AMDGPU-NEXT: ret void
1728 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1729 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__17
1730 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1731 ; AMDGPU-NEXT: entry:
1732 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1733 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1734 ; AMDGPU-NEXT: call void @p0() #[[ATTR11]]
1735 ; AMDGPU-NEXT: ret void
1738 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1739 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
1740 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1741 ; AMDGPU-NEXT: entry:
1742 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1743 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1744 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1745 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1746 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1747 ; AMDGPU-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1748 ; AMDGPU-NEXT: ret void
1751 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1752 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__18
1753 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1754 ; AMDGPU-NEXT: entry:
1755 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1756 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1757 ; AMDGPU-NEXT: call void @p0() #[[ATTR11]]
1758 ; AMDGPU-NEXT: ret void
1761 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1762 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
1763 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1764 ; AMDGPU-NEXT: entry:
1765 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1766 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1767 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1768 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1769 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1770 ; AMDGPU-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1771 ; AMDGPU-NEXT: ret void
1774 ; AMDGPU: Function Attrs: noinline nounwind
1775 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
1776 ; AMDGPU-SAME: () #[[ATTR6]] {
1777 ; AMDGPU-NEXT: entry:
1778 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1779 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1780 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1781 ; AMDGPU-NEXT: ret void
1784 ; AMDGPU: Function Attrs: convergent noinline nounwind
1785 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
1786 ; AMDGPU-SAME: () #[[ATTR1]] {
1787 ; AMDGPU-NEXT: entry:
1788 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1789 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1790 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1791 ; AMDGPU-NEXT: ret void
1794 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1795 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__19
1796 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1797 ; AMDGPU-NEXT: entry:
1798 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1799 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1800 ; AMDGPU-NEXT: call void @p0() #[[ATTR11]]
1801 ; AMDGPU-NEXT: ret void
1804 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1805 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
1806 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1807 ; AMDGPU-NEXT: entry:
1808 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1809 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1810 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1811 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1812 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1813 ; AMDGPU-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1814 ; AMDGPU-NEXT: ret void
1817 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1818 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
1819 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
1820 ; NVPTX-NEXT: entry:
1821 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1822 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1823 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
1824 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1825 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1826 ; NVPTX: user_code.entry:
1827 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
1828 ; NVPTX-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1829 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
1830 ; NVPTX-NEXT: ret void
1831 ; NVPTX: worker.exit:
1832 ; NVPTX-NEXT: ret void
1835 ; NVPTX-LABEL: define {{[^@]+}}@__kmpc_target_init
1836 ; NVPTX-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) {
1837 ; NVPTX-NEXT: ret i32 0
1840 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1841 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__
1842 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1843 ; NVPTX-NEXT: entry:
1844 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1845 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1846 ; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
1847 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
1848 ; NVPTX-NEXT: ret void
1851 ; NVPTX: Function Attrs: convergent noinline nounwind
1852 ; NVPTX-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
1853 ; NVPTX-SAME: () #[[ATTR1:[0-9]+]] {
1854 ; NVPTX-NEXT: entry:
1855 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
1856 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
1857 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1858 ; NVPTX-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1859 ; NVPTX: omp_if.then:
1860 ; NVPTX-NEXT: store i32 0, ptr @G, align 4
1861 ; NVPTX-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
1862 ; NVPTX-NEXT: br label [[OMP_IF_END]]
1863 ; NVPTX: omp_if.end:
1864 ; NVPTX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
1865 ; NVPTX-NEXT: ret void
1868 ; NVPTX: Function Attrs: convergent noinline nounwind
1869 ; NVPTX-LABEL: define {{[^@]+}}@no_parallel_region_in_here
1870 ; NVPTX-SAME: () #[[ATTR1]] {
1871 ; NVPTX-NEXT: entry:
1872 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
1873 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
1874 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1875 ; NVPTX-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1876 ; NVPTX: omp_if.then:
1877 ; NVPTX-NEXT: store i32 0, ptr @G, align 4
1878 ; NVPTX-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
1879 ; NVPTX-NEXT: br label [[OMP_IF_END]]
1880 ; NVPTX: omp_if.end:
1881 ; NVPTX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
1882 ; NVPTX-NEXT: ret void
1885 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1886 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
1887 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
1888 ; NVPTX-NEXT: entry:
1889 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
1890 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1891 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1892 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
1893 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1894 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1895 ; NVPTX: is_worker_check:
1896 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1897 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1898 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1899 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1900 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1901 ; NVPTX: worker_state_machine.begin:
1902 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1903 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
1904 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
1905 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
1906 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1907 ; NVPTX: worker_state_machine.finished:
1908 ; NVPTX-NEXT: ret void
1909 ; NVPTX: worker_state_machine.is_active.check:
1910 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1911 ; NVPTX: worker_state_machine.parallel_region.check:
1912 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__2_wrapper.ID
1913 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1914 ; NVPTX: worker_state_machine.parallel_region.execute:
1915 ; NVPTX-NEXT: call void @__omp_outlined__2_wrapper(i16 0, i32 [[TMP0]])
1916 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1917 ; NVPTX: worker_state_machine.parallel_region.check1:
1918 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1919 ; NVPTX: worker_state_machine.parallel_region.execute2:
1920 ; NVPTX-NEXT: call void @__omp_outlined__3_wrapper(i16 0, i32 [[TMP0]])
1921 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1922 ; NVPTX: worker_state_machine.parallel_region.check3:
1923 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1924 ; NVPTX: worker_state_machine.parallel_region.end:
1925 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
1926 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1927 ; NVPTX: worker_state_machine.done.barrier:
1928 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
1929 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1930 ; NVPTX: thread.user_code.check:
1931 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1932 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1933 ; NVPTX: user_code.entry:
1934 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
1935 ; NVPTX-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1936 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
1937 ; NVPTX-NEXT: ret void
1938 ; NVPTX: worker.exit:
1939 ; NVPTX-NEXT: ret void
1942 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1943 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__1
1944 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1945 ; NVPTX-NEXT: entry:
1946 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1947 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
1948 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
1949 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
1950 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
1951 ; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
1952 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
1953 ; NVPTX-NEXT: ret void
1956 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1957 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__2
1958 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1959 ; NVPTX-NEXT: entry:
1960 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1961 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1962 ; NVPTX-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
1963 ; NVPTX-NEXT: ret void
1966 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1967 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
1968 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1969 ; NVPTX-NEXT: entry:
1970 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1971 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1972 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1973 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1974 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1975 ; NVPTX-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1976 ; NVPTX-NEXT: ret void
1979 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1980 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3
1981 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1982 ; NVPTX-NEXT: entry:
1983 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1984 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1985 ; NVPTX-NEXT: call void @p1() #[[ATTR11]]
1986 ; NVPTX-NEXT: ret void
1989 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1990 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
1991 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1992 ; NVPTX-NEXT: entry:
1993 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1994 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1995 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1996 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
1997 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
1998 ; NVPTX-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
1999 ; NVPTX-NEXT: ret void
2002 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2003 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
2004 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2005 ; NVPTX-NEXT: entry:
2006 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
2007 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2008 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2009 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
2010 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2011 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2012 ; NVPTX: is_worker_check:
2013 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2014 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2015 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2016 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2017 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2018 ; NVPTX: worker_state_machine.begin:
2019 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2020 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
2021 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
2022 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
2023 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2024 ; NVPTX: worker_state_machine.finished:
2025 ; NVPTX-NEXT: ret void
2026 ; NVPTX: worker_state_machine.is_active.check:
2027 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2028 ; NVPTX: worker_state_machine.parallel_region.check:
2029 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__17_wrapper
2030 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2031 ; NVPTX: worker_state_machine.parallel_region.execute:
2032 ; NVPTX-NEXT: call void @__omp_outlined__17_wrapper(i16 0, i32 [[TMP0]])
2033 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2034 ; NVPTX: worker_state_machine.parallel_region.check1:
2035 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__5_wrapper.ID
2036 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
2037 ; NVPTX: worker_state_machine.parallel_region.execute2:
2038 ; NVPTX-NEXT: call void @__omp_outlined__5_wrapper(i16 0, i32 [[TMP0]])
2039 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2040 ; NVPTX: worker_state_machine.parallel_region.check3:
2041 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE5:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK6:%.*]]
2042 ; NVPTX: worker_state_machine.parallel_region.execute5:
2043 ; NVPTX-NEXT: call void @__omp_outlined__18_wrapper(i16 0, i32 [[TMP0]])
2044 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2045 ; NVPTX: worker_state_machine.parallel_region.check6:
2046 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2047 ; NVPTX: worker_state_machine.parallel_region.end:
2048 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2049 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2050 ; NVPTX: worker_state_machine.done.barrier:
2051 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2052 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2053 ; NVPTX: thread.user_code.check:
2054 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2055 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2056 ; NVPTX: user_code.entry:
2057 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2058 ; NVPTX-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2059 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
2060 ; NVPTX-NEXT: ret void
2061 ; NVPTX: worker.exit:
2062 ; NVPTX-NEXT: ret void
2065 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2066 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__4
2067 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2068 ; NVPTX-NEXT: entry:
2069 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2070 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2071 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2072 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
2073 ; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
2074 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2075 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
2076 ; NVPTX-NEXT: ret void
2079 ; NVPTX: Function Attrs: noinline nounwind
2080 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
2081 ; NVPTX-SAME: () #[[ATTR6:[0-9]+]] {
2082 ; NVPTX-NEXT: entry:
2083 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2084 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2085 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2086 ; NVPTX-NEXT: ret void
2089 ; NVPTX: Function Attrs: convergent noinline nounwind
2090 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
2091 ; NVPTX-SAME: () #[[ATTR1]] {
2092 ; NVPTX-NEXT: entry:
2093 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2094 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2095 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2096 ; NVPTX-NEXT: ret void
2099 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2100 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5
2101 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2102 ; NVPTX-NEXT: entry:
2103 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2104 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2105 ; NVPTX-NEXT: call void @p1() #[[ATTR11]]
2106 ; NVPTX-NEXT: ret void
2109 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2110 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
2111 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2112 ; NVPTX-NEXT: entry:
2113 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2114 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2115 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2116 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2117 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2118 ; NVPTX-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2119 ; NVPTX-NEXT: ret void
2122 ; NVPTX: Function Attrs: noinline nounwind
2123 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
2124 ; NVPTX-SAME: () #[[ATTR6]] {
2125 ; NVPTX-NEXT: entry:
2126 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2127 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2128 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2129 ; NVPTX-NEXT: ret void
2132 ; NVPTX: Function Attrs: convergent noinline nounwind
2133 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
2134 ; NVPTX-SAME: () #[[ATTR1]] {
2135 ; NVPTX-NEXT: entry:
2136 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2137 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2138 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2139 ; NVPTX-NEXT: ret void
2142 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2143 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
2144 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2145 ; NVPTX-NEXT: entry:
2146 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
2147 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2148 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2149 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
2150 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2151 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2152 ; NVPTX: is_worker_check:
2153 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2154 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2155 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2156 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2157 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2158 ; NVPTX: worker_state_machine.begin:
2159 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2160 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
2161 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
2162 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
2163 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2164 ; NVPTX: worker_state_machine.finished:
2165 ; NVPTX-NEXT: ret void
2166 ; NVPTX: worker_state_machine.is_active.check:
2167 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2168 ; NVPTX: worker_state_machine.parallel_region.check:
2169 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__7_wrapper.ID
2170 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2171 ; NVPTX: worker_state_machine.parallel_region.execute:
2172 ; NVPTX-NEXT: call void @__omp_outlined__7_wrapper(i16 0, i32 [[TMP0]])
2173 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2174 ; NVPTX: worker_state_machine.parallel_region.check1:
2175 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__8_wrapper.ID
2176 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]]
2177 ; NVPTX: worker_state_machine.parallel_region.execute2:
2178 ; NVPTX-NEXT: call void @__omp_outlined__8_wrapper(i16 0, i32 [[TMP0]])
2179 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2180 ; NVPTX: worker_state_machine.parallel_region.fallback.execute:
2181 ; NVPTX-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]])
2182 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2183 ; NVPTX: worker_state_machine.parallel_region.end:
2184 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2185 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2186 ; NVPTX: worker_state_machine.done.barrier:
2187 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2188 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2189 ; NVPTX: thread.user_code.check:
2190 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2191 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2192 ; NVPTX: user_code.entry:
2193 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2194 ; NVPTX-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2195 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
2196 ; NVPTX-NEXT: ret void
2197 ; NVPTX: worker.exit:
2198 ; NVPTX-NEXT: ret void
2201 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2202 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__6
2203 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2204 ; NVPTX-NEXT: entry:
2205 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2206 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2207 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2208 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2209 ; NVPTX-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
2210 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2211 ; NVPTX-NEXT: ret void
2214 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2215 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7
2216 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2217 ; NVPTX-NEXT: entry:
2218 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2219 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2220 ; NVPTX-NEXT: call void @p0() #[[ATTR11]]
2221 ; NVPTX-NEXT: ret void
2224 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2225 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
2226 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2227 ; NVPTX-NEXT: entry:
2228 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2229 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2230 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2231 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2232 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2233 ; NVPTX-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2234 ; NVPTX-NEXT: ret void
2237 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2238 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__8
2239 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2240 ; NVPTX-NEXT: entry:
2241 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2242 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2243 ; NVPTX-NEXT: call void @p1() #[[ATTR11]]
2244 ; NVPTX-NEXT: ret void
2247 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2248 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
2249 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2250 ; NVPTX-NEXT: entry:
2251 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2252 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2253 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2254 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2255 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2256 ; NVPTX-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2257 ; NVPTX-NEXT: ret void
2260 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2261 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
2262 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2263 ; NVPTX-NEXT: entry:
2264 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
2265 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2266 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2267 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
2268 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2269 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2270 ; NVPTX: is_worker_check:
2271 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2272 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2273 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2274 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2275 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2276 ; NVPTX: worker_state_machine.begin:
2277 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2278 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
2279 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
2280 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
2281 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2282 ; NVPTX: worker_state_machine.finished:
2283 ; NVPTX-NEXT: ret void
2284 ; NVPTX: worker_state_machine.is_active.check:
2285 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2286 ; NVPTX: worker_state_machine.parallel_region.check:
2287 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__10_wrapper.ID
2288 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2289 ; NVPTX: worker_state_machine.parallel_region.execute:
2290 ; NVPTX-NEXT: call void @__omp_outlined__10_wrapper(i16 0, i32 [[TMP0]])
2291 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2292 ; NVPTX: worker_state_machine.parallel_region.check1:
2293 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
2294 ; NVPTX: worker_state_machine.parallel_region.execute2:
2295 ; NVPTX-NEXT: call void @__omp_outlined__11_wrapper(i16 0, i32 [[TMP0]])
2296 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2297 ; NVPTX: worker_state_machine.parallel_region.check3:
2298 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2299 ; NVPTX: worker_state_machine.parallel_region.end:
2300 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2301 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2302 ; NVPTX: worker_state_machine.done.barrier:
2303 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2304 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2305 ; NVPTX: thread.user_code.check:
2306 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2307 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2308 ; NVPTX: user_code.entry:
2309 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2310 ; NVPTX-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2311 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
2312 ; NVPTX-NEXT: ret void
2313 ; NVPTX: worker.exit:
2314 ; NVPTX-NEXT: ret void
2317 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2318 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__9
2319 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2320 ; NVPTX-NEXT: entry:
2321 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2322 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2323 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2324 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2325 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2326 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2327 ; NVPTX-NEXT: ret void
2330 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2331 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__10
2332 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2333 ; NVPTX-NEXT: entry:
2334 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2335 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2336 ; NVPTX-NEXT: call void @p0() #[[ATTR11]]
2337 ; NVPTX-NEXT: ret void
2340 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2341 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
2342 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2343 ; NVPTX-NEXT: entry:
2344 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2345 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2346 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2347 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2348 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2349 ; NVPTX-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2350 ; NVPTX-NEXT: ret void
2353 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2354 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__11
2355 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2356 ; NVPTX-NEXT: entry:
2357 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2358 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2359 ; NVPTX-NEXT: call void @p1() #[[ATTR11]]
2360 ; NVPTX-NEXT: ret void
2363 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2364 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
2365 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2366 ; NVPTX-NEXT: entry:
2367 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2368 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2369 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2370 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2371 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2372 ; NVPTX-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2373 ; NVPTX-NEXT: ret void
2376 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2377 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
2378 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2379 ; NVPTX-NEXT: entry:
2380 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
2381 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2382 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2383 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
2384 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2385 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2386 ; NVPTX: is_worker_check:
2387 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2388 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2389 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2390 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2391 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2392 ; NVPTX: worker_state_machine.begin:
2393 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2394 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
2395 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
2396 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
2397 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2398 ; NVPTX: worker_state_machine.finished:
2399 ; NVPTX-NEXT: ret void
2400 ; NVPTX: worker_state_machine.is_active.check:
2401 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2402 ; NVPTX: worker_state_machine.parallel_region.check:
2403 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], @__omp_outlined__13_wrapper.ID
2404 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2405 ; NVPTX: worker_state_machine.parallel_region.execute:
2406 ; NVPTX-NEXT: call void @__omp_outlined__13_wrapper(i16 0, i32 [[TMP0]])
2407 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2408 ; NVPTX: worker_state_machine.parallel_region.check1:
2409 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
2410 ; NVPTX: worker_state_machine.parallel_region.execute2:
2411 ; NVPTX-NEXT: call void @__omp_outlined__14_wrapper(i16 0, i32 [[TMP0]])
2412 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2413 ; NVPTX: worker_state_machine.parallel_region.check3:
2414 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2415 ; NVPTX: worker_state_machine.parallel_region.end:
2416 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2417 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2418 ; NVPTX: worker_state_machine.done.barrier:
2419 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2420 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2421 ; NVPTX: thread.user_code.check:
2422 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2423 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2424 ; NVPTX: user_code.entry:
2425 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2426 ; NVPTX-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2427 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
2428 ; NVPTX-NEXT: ret void
2429 ; NVPTX: worker.exit:
2430 ; NVPTX-NEXT: ret void
2433 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2434 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__12
2435 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2436 ; NVPTX-NEXT: entry:
2437 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2438 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2439 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2440 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2441 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2442 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper.ID, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2443 ; NVPTX-NEXT: ret void
2446 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2447 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__13
2448 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2449 ; NVPTX-NEXT: entry:
2450 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2451 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2452 ; NVPTX-NEXT: call void @p0() #[[ATTR11]]
2453 ; NVPTX-NEXT: ret void
2456 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2457 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
2458 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2459 ; NVPTX-NEXT: entry:
2460 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2461 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2462 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2463 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2464 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2465 ; NVPTX-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2466 ; NVPTX-NEXT: ret void
2469 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2470 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__14
2471 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2472 ; NVPTX-NEXT: entry:
2473 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2474 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2475 ; NVPTX-NEXT: call void @p1() #[[ATTR11]]
2476 ; NVPTX-NEXT: ret void
2479 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2480 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
2481 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2482 ; NVPTX-NEXT: entry:
2483 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2484 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2485 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2486 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2487 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2488 ; NVPTX-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2489 ; NVPTX-NEXT: ret void
2492 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2493 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
2494 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2495 ; NVPTX-NEXT: entry:
2496 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2497 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2498 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
2499 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2500 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2501 ; NVPTX: user_code.entry:
2502 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2503 ; NVPTX-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2504 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
2505 ; NVPTX-NEXT: ret void
2506 ; NVPTX: worker.exit:
2507 ; NVPTX-NEXT: ret void
2510 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2511 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__15
2512 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2513 ; NVPTX-NEXT: entry:
2514 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2515 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2516 ; NVPTX-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
2517 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
2518 ; NVPTX-NEXT: ret void
2521 ; NVPTX: Function Attrs: noinline nounwind
2522 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
2523 ; NVPTX-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
2524 ; NVPTX-NEXT: entry:
2525 ; NVPTX-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2526 ; NVPTX-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
2527 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
2528 ; NVPTX-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
2529 ; NVPTX-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2531 ; NVPTX-NEXT: br label [[RETURN:%.*]]
2533 ; NVPTX-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
2534 ; NVPTX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
2535 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
2536 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
2537 ; NVPTX-NEXT: br label [[RETURN]]
2539 ; NVPTX-NEXT: ret void
2542 ; NVPTX: Function Attrs: convergent noinline nounwind
2543 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
2544 ; NVPTX-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
2545 ; NVPTX-NEXT: entry:
2546 ; NVPTX-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2547 ; NVPTX-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
2548 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
2549 ; NVPTX-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
2550 ; NVPTX-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2552 ; NVPTX-NEXT: br label [[RETURN:%.*]]
2554 ; NVPTX-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
2555 ; NVPTX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
2556 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
2557 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
2558 ; NVPTX-NEXT: br label [[RETURN]]
2560 ; NVPTX-NEXT: ret void
2563 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2564 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
2565 ; NVPTX-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2566 ; NVPTX-NEXT: entry:
2567 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8
2568 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2569 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2570 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
2571 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2572 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2573 ; NVPTX: is_worker_check:
2574 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2575 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2576 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2577 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2578 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2579 ; NVPTX: worker_state_machine.begin:
2580 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2581 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]])
2582 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8
2583 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null
2584 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2585 ; NVPTX: worker_state_machine.finished:
2586 ; NVPTX-NEXT: ret void
2587 ; NVPTX: worker_state_machine.is_active.check:
2588 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2589 ; NVPTX: worker_state_machine.parallel_region.fallback.execute:
2590 ; NVPTX-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]])
2591 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2592 ; NVPTX: worker_state_machine.parallel_region.end:
2593 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2594 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2595 ; NVPTX: worker_state_machine.done.barrier:
2596 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]])
2597 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2598 ; NVPTX: thread.user_code.check:
2599 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2600 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2601 ; NVPTX: user_code.entry:
2602 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2603 ; NVPTX-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2604 ; NVPTX-NEXT: call void @__kmpc_target_deinit()
2605 ; NVPTX-NEXT: ret void
2606 ; NVPTX: worker.exit:
2607 ; NVPTX-NEXT: ret void
2610 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2611 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__16
2612 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2613 ; NVPTX-NEXT: entry:
2614 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2615 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2616 ; NVPTX-NEXT: call void @weak_callee_empty() #[[ATTR9]]
2617 ; NVPTX-NEXT: ret void
2620 ; NVPTX: Function Attrs: convergent noinline nounwind
2621 ; NVPTX-LABEL: define {{[^@]+}}@weak_callee_empty
2622 ; NVPTX-SAME: () #[[ATTR1]] {
2623 ; NVPTX-NEXT: entry:
2624 ; NVPTX-NEXT: ret void
2627 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2628 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__17
2629 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2630 ; NVPTX-NEXT: entry:
2631 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2632 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2633 ; NVPTX-NEXT: call void @p0() #[[ATTR11]]
2634 ; NVPTX-NEXT: ret void
2637 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2638 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
2639 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2640 ; NVPTX-NEXT: entry:
2641 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2642 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2643 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2644 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2645 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2646 ; NVPTX-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2647 ; NVPTX-NEXT: ret void
2650 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2651 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__18
2652 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2653 ; NVPTX-NEXT: entry:
2654 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2655 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2656 ; NVPTX-NEXT: call void @p0() #[[ATTR11]]
2657 ; NVPTX-NEXT: ret void
2660 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2661 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
2662 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2663 ; NVPTX-NEXT: entry:
2664 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2665 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2666 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2667 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2668 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2669 ; NVPTX-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2670 ; NVPTX-NEXT: ret void
2673 ; NVPTX: Function Attrs: noinline nounwind
2674 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
2675 ; NVPTX-SAME: () #[[ATTR6]] {
2676 ; NVPTX-NEXT: entry:
2677 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2678 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2679 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2680 ; NVPTX-NEXT: ret void
2683 ; NVPTX: Function Attrs: convergent noinline nounwind
2684 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
2685 ; NVPTX-SAME: () #[[ATTR1]] {
2686 ; NVPTX-NEXT: entry:
2687 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2688 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2689 ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2690 ; NVPTX-NEXT: ret void
2693 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2694 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__19
2695 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2696 ; NVPTX-NEXT: entry:
2697 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2698 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2699 ; NVPTX-NEXT: call void @p0() #[[ATTR11]]
2700 ; NVPTX-NEXT: ret void
2703 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2704 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
2705 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2706 ; NVPTX-NEXT: entry:
2707 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2708 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2709 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2710 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2711 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2712 ; NVPTX-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2713 ; NVPTX-NEXT: ret void
2716 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2717 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
2718 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
2719 ; AMDGPU-DISABLED-NEXT: entry:
2720 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2721 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2722 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
2723 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2724 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2725 ; AMDGPU-DISABLED: user_code.entry:
2726 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
2727 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2728 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
2729 ; AMDGPU-DISABLED-NEXT: ret void
2730 ; AMDGPU-DISABLED: worker.exit:
2731 ; AMDGPU-DISABLED-NEXT: ret void
2734 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__kmpc_target_init
2735 ; AMDGPU-DISABLED-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) {
2736 ; AMDGPU-DISABLED-NEXT: ret i32 0
2739 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2740 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__
2741 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2742 ; AMDGPU-DISABLED-NEXT: entry:
2743 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2744 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2745 ; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
2746 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
2747 ; AMDGPU-DISABLED-NEXT: ret void
2750 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2751 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
2752 ; AMDGPU-DISABLED-SAME: () #[[ATTR1:[0-9]+]] {
2753 ; AMDGPU-DISABLED-NEXT: entry:
2754 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2755 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2756 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2757 ; AMDGPU-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2758 ; AMDGPU-DISABLED: omp_if.then:
2759 ; AMDGPU-DISABLED-NEXT: store i32 0, ptr @G, align 4
2760 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2761 ; AMDGPU-DISABLED-NEXT: br label [[OMP_IF_END]]
2762 ; AMDGPU-DISABLED: omp_if.end:
2763 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
2764 ; AMDGPU-DISABLED-NEXT: ret void
2767 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2768 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here
2769 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
2770 ; AMDGPU-DISABLED-NEXT: entry:
2771 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2772 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
2773 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2774 ; AMDGPU-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2775 ; AMDGPU-DISABLED: omp_if.then:
2776 ; AMDGPU-DISABLED-NEXT: store i32 0, ptr @G, align 4
2777 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
2778 ; AMDGPU-DISABLED-NEXT: br label [[OMP_IF_END]]
2779 ; AMDGPU-DISABLED: omp_if.end:
2780 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
2781 ; AMDGPU-DISABLED-NEXT: ret void
2784 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2785 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
2786 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2787 ; AMDGPU-DISABLED-NEXT: entry:
2788 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2789 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2790 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
2791 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2792 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2793 ; AMDGPU-DISABLED: user_code.entry:
2794 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2795 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2796 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
2797 ; AMDGPU-DISABLED-NEXT: ret void
2798 ; AMDGPU-DISABLED: worker.exit:
2799 ; AMDGPU-DISABLED-NEXT: ret void
2802 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2803 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__1
2804 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2805 ; AMDGPU-DISABLED-NEXT: entry:
2806 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2807 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2808 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2809 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2810 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2811 ; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
2812 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2813 ; AMDGPU-DISABLED-NEXT: ret void
2816 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2817 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2
2818 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2819 ; AMDGPU-DISABLED-NEXT: entry:
2820 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2821 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2822 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
2823 ; AMDGPU-DISABLED-NEXT: ret void
2826 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2827 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
2828 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2829 ; AMDGPU-DISABLED-NEXT: entry:
2830 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2831 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2832 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2833 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2834 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2835 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2836 ; AMDGPU-DISABLED-NEXT: ret void
2839 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2840 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3
2841 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2842 ; AMDGPU-DISABLED-NEXT: entry:
2843 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2844 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2845 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR11]]
2846 ; AMDGPU-DISABLED-NEXT: ret void
2849 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2850 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
2851 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2852 ; AMDGPU-DISABLED-NEXT: entry:
2853 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2854 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2855 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2856 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2857 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2858 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2859 ; AMDGPU-DISABLED-NEXT: ret void
2862 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2863 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
2864 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2865 ; AMDGPU-DISABLED-NEXT: entry:
2866 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2867 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2868 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
2869 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2870 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2871 ; AMDGPU-DISABLED: user_code.entry:
2872 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2873 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2874 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
2875 ; AMDGPU-DISABLED-NEXT: ret void
2876 ; AMDGPU-DISABLED: worker.exit:
2877 ; AMDGPU-DISABLED-NEXT: ret void
2880 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2881 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__4
2882 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2883 ; AMDGPU-DISABLED-NEXT: entry:
2884 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2885 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2886 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
2887 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
2888 ; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
2889 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2890 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
2891 ; AMDGPU-DISABLED-NEXT: ret void
2894 ; AMDGPU-DISABLED: Function Attrs: noinline nounwind
2895 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
2896 ; AMDGPU-DISABLED-SAME: () #[[ATTR6:[0-9]+]] {
2897 ; AMDGPU-DISABLED-NEXT: entry:
2898 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2899 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2900 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2901 ; AMDGPU-DISABLED-NEXT: ret void
2904 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2905 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
2906 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
2907 ; AMDGPU-DISABLED-NEXT: entry:
2908 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2909 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2910 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2911 ; AMDGPU-DISABLED-NEXT: ret void
2914 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2915 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5
2916 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2917 ; AMDGPU-DISABLED-NEXT: entry:
2918 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2919 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2920 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR11]]
2921 ; AMDGPU-DISABLED-NEXT: ret void
2924 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2925 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
2926 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2927 ; AMDGPU-DISABLED-NEXT: entry:
2928 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2929 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2930 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2931 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
2932 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
2933 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2934 ; AMDGPU-DISABLED-NEXT: ret void
2937 ; AMDGPU-DISABLED: Function Attrs: noinline nounwind
2938 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
2939 ; AMDGPU-DISABLED-SAME: () #[[ATTR6]] {
2940 ; AMDGPU-DISABLED-NEXT: entry:
2941 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2942 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
2943 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2944 ; AMDGPU-DISABLED-NEXT: ret void
2947 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2948 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
2949 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
2950 ; AMDGPU-DISABLED-NEXT: entry:
2951 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2952 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
2953 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2954 ; AMDGPU-DISABLED-NEXT: ret void
2957 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2958 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
2959 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
2960 ; AMDGPU-DISABLED-NEXT: entry:
2961 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2962 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2963 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
2964 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2965 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2966 ; AMDGPU-DISABLED: user_code.entry:
2967 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
2968 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
2969 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
2970 ; AMDGPU-DISABLED-NEXT: ret void
2971 ; AMDGPU-DISABLED: worker.exit:
2972 ; AMDGPU-DISABLED-NEXT: ret void
2975 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2976 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__6
2977 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2978 ; AMDGPU-DISABLED-NEXT: entry:
2979 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2980 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
2981 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
2982 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
2983 ; AMDGPU-DISABLED-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
2984 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
2985 ; AMDGPU-DISABLED-NEXT: ret void
2988 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2989 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7
2990 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2991 ; AMDGPU-DISABLED-NEXT: entry:
2992 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2993 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2994 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR11]]
2995 ; AMDGPU-DISABLED-NEXT: ret void
2998 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2999 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
3000 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3001 ; AMDGPU-DISABLED-NEXT: entry:
3002 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3003 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3004 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3005 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3006 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3007 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3008 ; AMDGPU-DISABLED-NEXT: ret void
3011 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3012 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8
3013 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3014 ; AMDGPU-DISABLED-NEXT: entry:
3015 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3016 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3017 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3018 ; AMDGPU-DISABLED-NEXT: ret void
3021 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3022 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
3023 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3024 ; AMDGPU-DISABLED-NEXT: entry:
3025 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3026 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3027 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3028 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3029 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3030 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3031 ; AMDGPU-DISABLED-NEXT: ret void
3034 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3035 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
3036 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3037 ; AMDGPU-DISABLED-NEXT: entry:
3038 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3039 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3040 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
3041 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3042 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3043 ; AMDGPU-DISABLED: user_code.entry:
3044 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3045 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3046 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
3047 ; AMDGPU-DISABLED-NEXT: ret void
3048 ; AMDGPU-DISABLED: worker.exit:
3049 ; AMDGPU-DISABLED-NEXT: ret void
3052 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3053 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__9
3054 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3055 ; AMDGPU-DISABLED-NEXT: entry:
3056 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3057 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3058 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3059 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3060 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3061 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3062 ; AMDGPU-DISABLED-NEXT: ret void
3065 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3066 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10
3067 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3068 ; AMDGPU-DISABLED-NEXT: entry:
3069 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3070 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3071 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3072 ; AMDGPU-DISABLED-NEXT: ret void
3075 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3076 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
3077 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3078 ; AMDGPU-DISABLED-NEXT: entry:
3079 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3080 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3081 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3082 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3083 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3084 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3085 ; AMDGPU-DISABLED-NEXT: ret void
3088 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3089 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11
3090 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3091 ; AMDGPU-DISABLED-NEXT: entry:
3092 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3093 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3094 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3095 ; AMDGPU-DISABLED-NEXT: ret void
3098 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3099 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
3100 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3101 ; AMDGPU-DISABLED-NEXT: entry:
3102 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3103 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3104 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3105 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3106 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3107 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3108 ; AMDGPU-DISABLED-NEXT: ret void
3111 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3112 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
3113 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3114 ; AMDGPU-DISABLED-NEXT: entry:
3115 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3116 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3117 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
3118 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3119 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3120 ; AMDGPU-DISABLED: user_code.entry:
3121 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3122 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3123 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
3124 ; AMDGPU-DISABLED-NEXT: ret void
3125 ; AMDGPU-DISABLED: worker.exit:
3126 ; AMDGPU-DISABLED-NEXT: ret void
3129 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3130 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__12
3131 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3132 ; AMDGPU-DISABLED-NEXT: entry:
3133 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3134 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3135 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3136 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3137 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3138 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3139 ; AMDGPU-DISABLED-NEXT: ret void
3142 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3143 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13
3144 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3145 ; AMDGPU-DISABLED-NEXT: entry:
3146 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3147 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3148 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3149 ; AMDGPU-DISABLED-NEXT: ret void
3152 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3153 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
3154 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3155 ; AMDGPU-DISABLED-NEXT: entry:
3156 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3157 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3158 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3159 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3160 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3161 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3162 ; AMDGPU-DISABLED-NEXT: ret void
3165 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3166 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14
3167 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3168 ; AMDGPU-DISABLED-NEXT: entry:
3169 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3170 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3171 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3172 ; AMDGPU-DISABLED-NEXT: ret void
3175 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3176 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
3177 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3178 ; AMDGPU-DISABLED-NEXT: entry:
3179 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3180 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3181 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3182 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3183 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3184 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3185 ; AMDGPU-DISABLED-NEXT: ret void
3188 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3189 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
3190 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3191 ; AMDGPU-DISABLED-NEXT: entry:
3192 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3193 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3194 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
3195 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3196 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3197 ; AMDGPU-DISABLED: user_code.entry:
3198 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3199 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3200 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
3201 ; AMDGPU-DISABLED-NEXT: ret void
3202 ; AMDGPU-DISABLED: worker.exit:
3203 ; AMDGPU-DISABLED-NEXT: ret void
3206 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3207 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__15
3208 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3209 ; AMDGPU-DISABLED-NEXT: entry:
3210 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3211 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3212 ; AMDGPU-DISABLED-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
3213 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
3214 ; AMDGPU-DISABLED-NEXT: ret void
3217 ; AMDGPU-DISABLED: Function Attrs: noinline nounwind
3218 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
3219 ; AMDGPU-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
3220 ; AMDGPU-DISABLED-NEXT: entry:
3221 ; AMDGPU-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3222 ; AMDGPU-DISABLED-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
3223 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
3224 ; AMDGPU-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
3225 ; AMDGPU-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3226 ; AMDGPU-DISABLED: if.then:
3227 ; AMDGPU-DISABLED-NEXT: br label [[RETURN:%.*]]
3228 ; AMDGPU-DISABLED: if.end:
3229 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
3230 ; AMDGPU-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
3231 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
3232 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
3233 ; AMDGPU-DISABLED-NEXT: br label [[RETURN]]
3234 ; AMDGPU-DISABLED: return:
3235 ; AMDGPU-DISABLED-NEXT: ret void
3238 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3239 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
3240 ; AMDGPU-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
3241 ; AMDGPU-DISABLED-NEXT: entry:
3242 ; AMDGPU-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3243 ; AMDGPU-DISABLED-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
3244 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
3245 ; AMDGPU-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
3246 ; AMDGPU-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3247 ; AMDGPU-DISABLED: if.then:
3248 ; AMDGPU-DISABLED-NEXT: br label [[RETURN:%.*]]
3249 ; AMDGPU-DISABLED: if.end:
3250 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
3251 ; AMDGPU-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
3252 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
3253 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
3254 ; AMDGPU-DISABLED-NEXT: br label [[RETURN]]
3255 ; AMDGPU-DISABLED: return:
3256 ; AMDGPU-DISABLED-NEXT: ret void
3259 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3260 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
3261 ; AMDGPU-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3262 ; AMDGPU-DISABLED-NEXT: entry:
3263 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3264 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3265 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
3266 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3267 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3268 ; AMDGPU-DISABLED: user_code.entry:
3269 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3270 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3271 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit()
3272 ; AMDGPU-DISABLED-NEXT: ret void
3273 ; AMDGPU-DISABLED: worker.exit:
3274 ; AMDGPU-DISABLED-NEXT: ret void
3277 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3278 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__16
3279 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3280 ; AMDGPU-DISABLED-NEXT: entry:
3281 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3282 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3283 ; AMDGPU-DISABLED-NEXT: call void @weak_callee_empty() #[[ATTR9]]
3284 ; AMDGPU-DISABLED-NEXT: ret void
3287 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3288 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@weak_callee_empty
3289 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
3290 ; AMDGPU-DISABLED-NEXT: entry:
3291 ; AMDGPU-DISABLED-NEXT: ret void
3294 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3295 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17
3296 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3297 ; AMDGPU-DISABLED-NEXT: entry:
3298 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3299 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3300 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3301 ; AMDGPU-DISABLED-NEXT: ret void
3304 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3305 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
3306 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3307 ; AMDGPU-DISABLED-NEXT: entry:
3308 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3309 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3310 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3311 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3312 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3313 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3314 ; AMDGPU-DISABLED-NEXT: ret void
3317 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3318 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18
3319 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3320 ; AMDGPU-DISABLED-NEXT: entry:
3321 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3322 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3323 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3324 ; AMDGPU-DISABLED-NEXT: ret void
3327 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3328 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
3329 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3330 ; AMDGPU-DISABLED-NEXT: entry:
3331 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3332 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3333 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3334 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3335 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3336 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3337 ; AMDGPU-DISABLED-NEXT: ret void
3340 ; AMDGPU-DISABLED: Function Attrs: noinline nounwind
3341 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
3342 ; AMDGPU-DISABLED-SAME: () #[[ATTR6]] {
3343 ; AMDGPU-DISABLED-NEXT: entry:
3344 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3345 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3346 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3347 ; AMDGPU-DISABLED-NEXT: ret void
3350 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3351 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
3352 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
3353 ; AMDGPU-DISABLED-NEXT: entry:
3354 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3355 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3356 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3357 ; AMDGPU-DISABLED-NEXT: ret void
3360 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3361 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19
3362 ; AMDGPU-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3363 ; AMDGPU-DISABLED-NEXT: entry:
3364 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3365 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3366 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3367 ; AMDGPU-DISABLED-NEXT: ret void
3370 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3371 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
3372 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3373 ; AMDGPU-DISABLED-NEXT: entry:
3374 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3375 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3376 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3377 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3378 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3379 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3380 ; AMDGPU-DISABLED-NEXT: ret void
3383 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3384 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
3385 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0:[0-9]+]] {
3386 ; NVPTX-DISABLED-NEXT: entry:
3387 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3388 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3389 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14_kernel_environment, ptr [[DYN]])
3390 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3391 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3392 ; NVPTX-DISABLED: user_code.entry:
3393 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3:[0-9]+]]
3394 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3395 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3396 ; NVPTX-DISABLED-NEXT: ret void
3397 ; NVPTX-DISABLED: worker.exit:
3398 ; NVPTX-DISABLED-NEXT: ret void
3401 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__kmpc_target_init
3402 ; NVPTX-DISABLED-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) {
3403 ; NVPTX-DISABLED-NEXT: ret i32 0
3406 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3407 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__
3408 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3409 ; NVPTX-DISABLED-NEXT: entry:
3410 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3411 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3412 ; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9:[0-9]+]]
3413 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10:[0-9]+]]
3414 ; NVPTX-DISABLED-NEXT: ret void
3417 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3418 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
3419 ; NVPTX-DISABLED-SAME: () #[[ATTR1:[0-9]+]] {
3420 ; NVPTX-DISABLED-NEXT: entry:
3421 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3422 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
3423 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
3424 ; NVPTX-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
3425 ; NVPTX-DISABLED: omp_if.then:
3426 ; NVPTX-DISABLED-NEXT: store i32 0, ptr @G, align 4
3427 ; NVPTX-DISABLED-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
3428 ; NVPTX-DISABLED-NEXT: br label [[OMP_IF_END]]
3429 ; NVPTX-DISABLED: omp_if.end:
3430 ; NVPTX-DISABLED-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]]) #[[ATTR3]]
3431 ; NVPTX-DISABLED-NEXT: ret void
3434 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3435 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here
3436 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3437 ; NVPTX-DISABLED-NEXT: entry:
3438 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3439 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB2]], i32 [[TMP0]])
3440 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
3441 ; NVPTX-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
3442 ; NVPTX-DISABLED: omp_if.then:
3443 ; NVPTX-DISABLED-NEXT: store i32 0, ptr @G, align 4
3444 ; NVPTX-DISABLED-NEXT: call void @__kmpc_end_single(ptr @[[GLOB2]], i32 [[TMP0]])
3445 ; NVPTX-DISABLED-NEXT: br label [[OMP_IF_END]]
3446 ; NVPTX-DISABLED: omp_if.end:
3447 ; NVPTX-DISABLED-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP0]])
3448 ; NVPTX-DISABLED-NEXT: ret void
3451 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3452 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
3453 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3454 ; NVPTX-DISABLED-NEXT: entry:
3455 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3456 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3457 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_l22_kernel_environment, ptr [[DYN]])
3458 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3459 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3460 ; NVPTX-DISABLED: user_code.entry:
3461 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3462 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3463 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3464 ; NVPTX-DISABLED-NEXT: ret void
3465 ; NVPTX-DISABLED: worker.exit:
3466 ; NVPTX-DISABLED-NEXT: ret void
3469 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3470 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__1
3471 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3472 ; NVPTX-DISABLED-NEXT: entry:
3473 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3474 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3475 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3476 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3477 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__2, ptr @__omp_outlined__2_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3478 ; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
3479 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3480 ; NVPTX-DISABLED-NEXT: ret void
3483 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3484 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2
3485 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3486 ; NVPTX-DISABLED-NEXT: entry:
3487 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3488 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3489 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR11:[0-9]+]]
3490 ; NVPTX-DISABLED-NEXT: ret void
3493 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3494 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
3495 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3496 ; NVPTX-DISABLED-NEXT: entry:
3497 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3498 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3499 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3500 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3501 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3502 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__2(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3503 ; NVPTX-DISABLED-NEXT: ret void
3506 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3507 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3
3508 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3509 ; NVPTX-DISABLED-NEXT: entry:
3510 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3511 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3512 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3513 ; NVPTX-DISABLED-NEXT: ret void
3516 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3517 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
3518 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3519 ; NVPTX-DISABLED-NEXT: entry:
3520 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3521 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3522 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3523 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3524 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3525 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3526 ; NVPTX-DISABLED-NEXT: ret void
3529 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3530 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
3531 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3532 ; NVPTX-DISABLED-NEXT: entry:
3533 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3534 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3535 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_kernel_environment, ptr [[DYN]])
3536 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3537 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3538 ; NVPTX-DISABLED: user_code.entry:
3539 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3540 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__4(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3541 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3542 ; NVPTX-DISABLED-NEXT: ret void
3543 ; NVPTX-DISABLED: worker.exit:
3544 ; NVPTX-DISABLED-NEXT: ret void
3547 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3548 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__4
3549 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3550 ; NVPTX-DISABLED-NEXT: entry:
3551 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3552 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3553 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3554 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR9]]
3555 ; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR9]]
3556 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3557 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR9]]
3558 ; NVPTX-DISABLED-NEXT: ret void
3561 ; NVPTX-DISABLED: Function Attrs: noinline nounwind
3562 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
3563 ; NVPTX-DISABLED-SAME: () #[[ATTR6:[0-9]+]] {
3564 ; NVPTX-DISABLED-NEXT: entry:
3565 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3566 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3567 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3568 ; NVPTX-DISABLED-NEXT: ret void
3571 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3572 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
3573 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3574 ; NVPTX-DISABLED-NEXT: entry:
3575 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3576 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3577 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__17, ptr @__omp_outlined__17_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3578 ; NVPTX-DISABLED-NEXT: ret void
3581 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3582 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5
3583 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3584 ; NVPTX-DISABLED-NEXT: entry:
3585 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3586 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3587 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3588 ; NVPTX-DISABLED-NEXT: ret void
3591 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3592 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
3593 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3594 ; NVPTX-DISABLED-NEXT: entry:
3595 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3596 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3597 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3598 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3599 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3600 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3601 ; NVPTX-DISABLED-NEXT: ret void
3604 ; NVPTX-DISABLED: Function Attrs: noinline nounwind
3605 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
3606 ; NVPTX-DISABLED-SAME: () #[[ATTR6]] {
3607 ; NVPTX-DISABLED-NEXT: entry:
3608 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3609 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
3610 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3611 ; NVPTX-DISABLED-NEXT: ret void
3614 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3615 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
3616 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3617 ; NVPTX-DISABLED-NEXT: entry:
3618 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3619 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
3620 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__18, ptr @__omp_outlined__18_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3621 ; NVPTX-DISABLED-NEXT: ret void
3624 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3625 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
3626 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3627 ; NVPTX-DISABLED-NEXT: entry:
3628 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3629 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3630 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_kernel_environment, ptr [[DYN]])
3631 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3632 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3633 ; NVPTX-DISABLED: user_code.entry:
3634 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3635 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__6(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3636 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3637 ; NVPTX-DISABLED-NEXT: ret void
3638 ; NVPTX-DISABLED: worker.exit:
3639 ; NVPTX-DISABLED-NEXT: ret void
3642 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3643 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__6
3644 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3645 ; NVPTX-DISABLED-NEXT: entry:
3646 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3647 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3648 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3649 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3650 ; NVPTX-DISABLED-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR11]]
3651 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__8, ptr @__omp_outlined__8_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3652 ; NVPTX-DISABLED-NEXT: ret void
3655 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3656 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7
3657 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3658 ; NVPTX-DISABLED-NEXT: entry:
3659 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3660 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3661 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3662 ; NVPTX-DISABLED-NEXT: ret void
3665 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3666 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
3667 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3668 ; NVPTX-DISABLED-NEXT: entry:
3669 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3670 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3671 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3672 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3673 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3674 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__7(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3675 ; NVPTX-DISABLED-NEXT: ret void
3678 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3679 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8
3680 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3681 ; NVPTX-DISABLED-NEXT: entry:
3682 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3683 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3684 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3685 ; NVPTX-DISABLED-NEXT: ret void
3688 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3689 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
3690 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3691 ; NVPTX-DISABLED-NEXT: entry:
3692 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3693 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3694 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3695 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3696 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3697 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__8(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3698 ; NVPTX-DISABLED-NEXT: ret void
3701 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3702 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
3703 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3704 ; NVPTX-DISABLED-NEXT: entry:
3705 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3706 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3707 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_kernel_environment, ptr [[DYN]])
3708 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3709 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3710 ; NVPTX-DISABLED: user_code.entry:
3711 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3712 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__9(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3713 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3714 ; NVPTX-DISABLED-NEXT: ret void
3715 ; NVPTX-DISABLED: worker.exit:
3716 ; NVPTX-DISABLED-NEXT: ret void
3719 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3720 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__9
3721 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3722 ; NVPTX-DISABLED-NEXT: entry:
3723 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3724 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3725 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3726 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__10, ptr @__omp_outlined__10_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3727 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3728 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__11, ptr @__omp_outlined__11_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3729 ; NVPTX-DISABLED-NEXT: ret void
3732 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3733 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10
3734 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3735 ; NVPTX-DISABLED-NEXT: entry:
3736 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3737 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3738 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3739 ; NVPTX-DISABLED-NEXT: ret void
3742 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3743 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
3744 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3745 ; NVPTX-DISABLED-NEXT: entry:
3746 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3747 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3748 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3749 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3750 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3751 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__10(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3752 ; NVPTX-DISABLED-NEXT: ret void
3755 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3756 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11
3757 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3758 ; NVPTX-DISABLED-NEXT: entry:
3759 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3760 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3761 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3762 ; NVPTX-DISABLED-NEXT: ret void
3765 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3766 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
3767 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3768 ; NVPTX-DISABLED-NEXT: entry:
3769 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3770 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3771 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3772 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3773 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3774 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__11(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3775 ; NVPTX-DISABLED-NEXT: ret void
3778 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3779 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
3780 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3781 ; NVPTX-DISABLED-NEXT: entry:
3782 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3783 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3784 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_kernel_environment, ptr [[DYN]])
3785 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3786 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3787 ; NVPTX-DISABLED: user_code.entry:
3788 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3789 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__12(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3790 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3791 ; NVPTX-DISABLED-NEXT: ret void
3792 ; NVPTX-DISABLED: worker.exit:
3793 ; NVPTX-DISABLED-NEXT: ret void
3796 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3797 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__12
3798 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3799 ; NVPTX-DISABLED-NEXT: entry:
3800 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3801 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
3802 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x ptr], align 8
3803 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR10]]
3804 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__13, ptr @__omp_outlined__13_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
3805 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 undef, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__14, ptr @__omp_outlined__14_wrapper, ptr [[CAPTURED_VARS_ADDRS1]], i64 0)
3806 ; NVPTX-DISABLED-NEXT: ret void
3809 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3810 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13
3811 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3812 ; NVPTX-DISABLED-NEXT: entry:
3813 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3814 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3815 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3816 ; NVPTX-DISABLED-NEXT: ret void
3819 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3820 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
3821 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3822 ; NVPTX-DISABLED-NEXT: entry:
3823 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3824 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3825 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3826 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3827 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3828 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__13(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3829 ; NVPTX-DISABLED-NEXT: ret void
3832 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3833 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14
3834 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3835 ; NVPTX-DISABLED-NEXT: entry:
3836 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3837 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3838 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR11]]
3839 ; NVPTX-DISABLED-NEXT: ret void
3842 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3843 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
3844 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3845 ; NVPTX-DISABLED-NEXT: entry:
3846 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3847 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3848 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3849 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3850 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3851 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__14(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3852 ; NVPTX-DISABLED-NEXT: ret void
3855 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3856 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
3857 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3858 ; NVPTX-DISABLED-NEXT: entry:
3859 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3860 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3861 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_kernel_environment, ptr [[DYN]])
3862 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3863 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3864 ; NVPTX-DISABLED: user_code.entry:
3865 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3866 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__15(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3867 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3868 ; NVPTX-DISABLED-NEXT: ret void
3869 ; NVPTX-DISABLED: worker.exit:
3870 ; NVPTX-DISABLED-NEXT: ret void
3873 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3874 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__15
3875 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3876 ; NVPTX-DISABLED-NEXT: entry:
3877 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3878 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3879 ; NVPTX-DISABLED-NEXT: [[CALL:%.*]] = call i32 @omp_get_thread_num() #[[ATTR9]]
3880 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR9]]
3881 ; NVPTX-DISABLED-NEXT: ret void
3884 ; NVPTX-DISABLED: Function Attrs: noinline nounwind
3885 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
3886 ; NVPTX-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR6]] {
3887 ; NVPTX-DISABLED-NEXT: entry:
3888 ; NVPTX-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3889 ; NVPTX-DISABLED-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
3890 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
3891 ; NVPTX-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
3892 ; NVPTX-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3893 ; NVPTX-DISABLED: if.then:
3894 ; NVPTX-DISABLED-NEXT: br label [[RETURN:%.*]]
3895 ; NVPTX-DISABLED: if.end:
3896 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
3897 ; NVPTX-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
3898 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR9]]
3899 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR9]]
3900 ; NVPTX-DISABLED-NEXT: br label [[RETURN]]
3901 ; NVPTX-DISABLED: return:
3902 ; NVPTX-DISABLED-NEXT: ret void
3905 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3906 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
3907 ; NVPTX-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
3908 ; NVPTX-DISABLED-NEXT: entry:
3909 ; NVPTX-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3910 ; NVPTX-DISABLED-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
3911 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
3912 ; NVPTX-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
3913 ; NVPTX-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3914 ; NVPTX-DISABLED: if.then:
3915 ; NVPTX-DISABLED-NEXT: br label [[RETURN:%.*]]
3916 ; NVPTX-DISABLED: if.end:
3917 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
3918 ; NVPTX-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
3919 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR11]]
3920 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR11]]
3921 ; NVPTX-DISABLED-NEXT: br label [[RETURN]]
3922 ; NVPTX-DISABLED: return:
3923 ; NVPTX-DISABLED-NEXT: ret void
3926 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3927 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
3928 ; NVPTX-DISABLED-SAME: (ptr [[DYN:%.*]]) #[[ATTR0]] {
3929 ; NVPTX-DISABLED-NEXT: entry:
3930 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3931 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3932 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_kernel_environment, ptr [[DYN]])
3933 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3934 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3935 ; NVPTX-DISABLED: user_code.entry:
3936 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR3]]
3937 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__16(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3938 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit()
3939 ; NVPTX-DISABLED-NEXT: ret void
3940 ; NVPTX-DISABLED: worker.exit:
3941 ; NVPTX-DISABLED-NEXT: ret void
3944 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3945 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__16
3946 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3947 ; NVPTX-DISABLED-NEXT: entry:
3948 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3949 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3950 ; NVPTX-DISABLED-NEXT: call void @weak_callee_empty() #[[ATTR9]]
3951 ; NVPTX-DISABLED-NEXT: ret void
3954 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3955 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@weak_callee_empty
3956 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3957 ; NVPTX-DISABLED-NEXT: entry:
3958 ; NVPTX-DISABLED-NEXT: ret void
3961 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3962 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17
3963 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3964 ; NVPTX-DISABLED-NEXT: entry:
3965 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3966 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3967 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3968 ; NVPTX-DISABLED-NEXT: ret void
3971 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3972 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
3973 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3974 ; NVPTX-DISABLED-NEXT: entry:
3975 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3976 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3977 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3978 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
3979 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
3980 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__17(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
3981 ; NVPTX-DISABLED-NEXT: ret void
3984 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3985 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18
3986 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3987 ; NVPTX-DISABLED-NEXT: entry:
3988 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3989 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3990 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR11]]
3991 ; NVPTX-DISABLED-NEXT: ret void
3994 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3995 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
3996 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3997 ; NVPTX-DISABLED-NEXT: entry:
3998 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3999 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4000 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4001 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4002 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4003 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__18(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4004 ; NVPTX-DISABLED-NEXT: ret void
4007 ; NVPTX-DISABLED: Function Attrs: noinline nounwind
4008 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
4009 ; NVPTX-DISABLED-SAME: () #[[ATTR6]] {
4010 ; NVPTX-DISABLED-NEXT: entry:
4011 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4012 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]]) #[[ATTR3]]
4013 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4014 ; NVPTX-DISABLED-NEXT: ret void
4017 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
4018 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
4019 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
4020 ; NVPTX-DISABLED-NEXT: entry:
4021 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8
4022 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2]])
4023 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__19, ptr @__omp_outlined__19_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0)
4024 ; NVPTX-DISABLED-NEXT: ret void
4027 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4028 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19
4029 ; NVPTX-DISABLED-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4030 ; NVPTX-DISABLED-NEXT: entry:
4031 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4032 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4033 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR11]]
4034 ; NVPTX-DISABLED-NEXT: ret void
4037 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4038 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
4039 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4040 ; NVPTX-DISABLED-NEXT: entry:
4041 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4042 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4043 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4044 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8
4045 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]])
4046 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__19(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR3]]
4047 ; NVPTX-DISABLED-NEXT: ret void
4050 ; AMDGPU: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4051 ; AMDGPU: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4052 ; AMDGPU: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4053 ; AMDGPU: attributes #[[ATTR3]] = { nounwind }
4054 ; AMDGPU: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4055 ; AMDGPU: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4056 ; AMDGPU: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4057 ; AMDGPU: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4058 ; AMDGPU: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4059 ; AMDGPU: attributes #[[ATTR9]] = { convergent nounwind }
4060 ; AMDGPU: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4061 ; AMDGPU: attributes #[[ATTR11]] = { convergent }
4063 ; NVPTX: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4064 ; NVPTX: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4065 ; NVPTX: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4066 ; NVPTX: attributes #[[ATTR3]] = { nounwind }
4067 ; NVPTX: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4068 ; NVPTX: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4069 ; NVPTX: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4070 ; NVPTX: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4071 ; NVPTX: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4072 ; NVPTX: attributes #[[ATTR9]] = { convergent nounwind }
4073 ; NVPTX: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4074 ; NVPTX: attributes #[[ATTR11]] = { convergent }
4076 ; AMDGPU-DISABLED: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4077 ; AMDGPU-DISABLED: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4078 ; AMDGPU-DISABLED: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4079 ; AMDGPU-DISABLED: attributes #[[ATTR3]] = { nounwind }
4080 ; AMDGPU-DISABLED: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4081 ; AMDGPU-DISABLED: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4082 ; AMDGPU-DISABLED: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4083 ; AMDGPU-DISABLED: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4084 ; AMDGPU-DISABLED: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4085 ; AMDGPU-DISABLED: attributes #[[ATTR9]] = { convergent nounwind }
4086 ; AMDGPU-DISABLED: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4087 ; AMDGPU-DISABLED: attributes #[[ATTR11]] = { convergent }
4089 ; NVPTX-DISABLED: attributes #[[ATTR0]] = { convergent noinline norecurse nounwind "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4090 ; NVPTX-DISABLED: attributes #[[ATTR1]] = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4091 ; NVPTX-DISABLED: attributes #[[ATTR2:[0-9]+]] = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4092 ; NVPTX-DISABLED: attributes #[[ATTR3]] = { nounwind }
4093 ; NVPTX-DISABLED: attributes #[[ATTR4:[0-9]+]] = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4094 ; NVPTX-DISABLED: attributes #[[ATTR5:[0-9]+]] = { alwaysinline }
4095 ; NVPTX-DISABLED: attributes #[[ATTR6]] = { noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4096 ; NVPTX-DISABLED: attributes #[[ATTR7:[0-9]+]] = { convergent nounwind willreturn memory(read) "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4097 ; NVPTX-DISABLED: attributes #[[ATTR8:[0-9]+]] = { convergent nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
4098 ; NVPTX-DISABLED: attributes #[[ATTR9]] = { convergent nounwind }
4099 ; NVPTX-DISABLED: attributes #[[ATTR10]] = { convergent "llvm.assume"="omp_no_openmp" }
4100 ; NVPTX-DISABLED: attributes #[[ATTR11]] = { convergent }
4102 ; AMDGPU: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
4103 ; AMDGPU: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
4104 ; AMDGPU: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
4105 ; AMDGPU: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
4106 ; AMDGPU: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
4107 ; AMDGPU: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
4108 ; AMDGPU: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
4109 ; AMDGPU: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
4110 ; AMDGPU: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
4111 ; AMDGPU: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
4112 ; AMDGPU: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
4113 ; AMDGPU: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
4114 ; AMDGPU: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
4115 ; AMDGPU: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
4116 ; AMDGPU: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
4117 ; AMDGPU: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
4118 ; AMDGPU: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
4119 ; AMDGPU: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
4120 ; AMDGPU: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
4122 ; NVPTX: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
4123 ; NVPTX: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
4124 ; NVPTX: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
4125 ; NVPTX: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
4126 ; NVPTX: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
4127 ; NVPTX: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
4128 ; NVPTX: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
4129 ; NVPTX: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
4130 ; NVPTX: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
4131 ; NVPTX: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
4132 ; NVPTX: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
4133 ; NVPTX: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
4134 ; NVPTX: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
4135 ; NVPTX: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
4136 ; NVPTX: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
4137 ; NVPTX: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
4138 ; NVPTX: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
4139 ; NVPTX: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
4140 ; NVPTX: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
4142 ; AMDGPU-DISABLED: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
4143 ; AMDGPU-DISABLED: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
4144 ; AMDGPU-DISABLED: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
4145 ; AMDGPU-DISABLED: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
4146 ; AMDGPU-DISABLED: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
4147 ; AMDGPU-DISABLED: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
4148 ; AMDGPU-DISABLED: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
4149 ; AMDGPU-DISABLED: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
4150 ; AMDGPU-DISABLED: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
4151 ; AMDGPU-DISABLED: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
4152 ; AMDGPU-DISABLED: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
4153 ; AMDGPU-DISABLED: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
4154 ; AMDGPU-DISABLED: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
4155 ; AMDGPU-DISABLED: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
4156 ; AMDGPU-DISABLED: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
4157 ; AMDGPU-DISABLED: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
4158 ; AMDGPU-DISABLED: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
4159 ; AMDGPU-DISABLED: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
4160 ; AMDGPU-DISABLED: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}
4162 ; NVPTX-DISABLED: [[META0:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
4163 ; NVPTX-DISABLED: [[META1:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
4164 ; NVPTX-DISABLED: [[META2:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
4165 ; NVPTX-DISABLED: [[META3:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
4166 ; NVPTX-DISABLED: [[META4:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
4167 ; NVPTX-DISABLED: [[META5:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
4168 ; NVPTX-DISABLED: [[META6:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
4169 ; NVPTX-DISABLED: [[META7:![0-9]+]] = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
4170 ; NVPTX-DISABLED: [[META8:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
4171 ; NVPTX-DISABLED: [[META9:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
4172 ; NVPTX-DISABLED: [[META10:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
4173 ; NVPTX-DISABLED: [[META11:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
4174 ; NVPTX-DISABLED: [[META12:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
4175 ; NVPTX-DISABLED: [[META13:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
4176 ; NVPTX-DISABLED: [[META14:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
4177 ; NVPTX-DISABLED: [[META15:![0-9]+]] = !{ptr @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
4178 ; NVPTX-DISABLED: [[META16:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
4179 ; NVPTX-DISABLED: [[META17:![0-9]+]] = !{i32 7, !"openmp", i32 50}
4180 ; NVPTX-DISABLED: [[META18:![0-9]+]] = !{i32 7, !"openmp-device", i32 50}