1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --include-generated-funcs
2 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=AMDGPU
3 ; RUN: opt --mtriple=nvptx64-- -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=NVPTX
4 ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -openmp-opt-disable-state-machine-rewrite -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=AMDGPU-DISABLED
5 ; RUN: opt --mtriple=nvptx64-- -openmp-opt-disable-state-machine-rewrite -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=NVPTX-DISABLED
10 ;; void unknown_pure(void) __attribute__((pure));
11 ;; void unknown_no_openmp(void) __attribute__((assume("omp_no_openmp")));
14 ;; void no_parallel_region_in_here(void) {
19 ;; void no_state_machine_needed() {
20 ;; #pragma omp target teams
22 ;; no_parallel_region_in_here();
23 ;; unknown_no_openmp();
27 ;; void simple_state_machine() {
28 ;; #pragma omp target teams
30 ;; unknown_no_openmp();
31 ;; #pragma omp parallel
33 ;; no_parallel_region_in_here();
34 ;; #pragma omp parallel
39 ;; void simple_state_machine_interprocedural_after(void);
40 ;; void simple_state_machine_interprocedural_before(void) {
41 ;; #pragma omp parallel
44 ;; void simple_state_machine_interprocedural() {
45 ;; #pragma omp target teams
47 ;; unknown_no_openmp();
48 ;; simple_state_machine_interprocedural_before();
49 ;; no_parallel_region_in_here();
50 ;; #pragma omp parallel
52 ;; simple_state_machine_interprocedural_after();
55 ;; void simple_state_machine_interprocedural_after(void) {
56 ;; #pragma omp parallel
60 ;; void simple_state_machine_with_fallback() {
61 ;; #pragma omp target teams
63 ;; #pragma omp parallel
66 ;; #pragma omp parallel
71 ;; void simple_state_machine_no_openmp_attr() {
72 ;; #pragma omp target teams
74 ;; #pragma omp parallel
76 ;; unknown_no_openmp();
77 ;; #pragma omp parallel
82 ;; void simple_state_machine_pure() {
83 ;; #pragma omp target teams
85 ;; unknown_no_openmp();
86 ;; #pragma omp parallel
89 ;; #pragma omp parallel
94 ;; int omp_get_thread_num();
95 ;; void simple_state_machine_interprocedural_nested_recursive_after(int);
96 ;; void simple_state_machine_interprocedural_nested_recursive_after_after(void);
97 ;; void simple_state_machine_interprocedural_nested_recursive() {
98 ;; #pragma omp target teams
100 ;; simple_state_machine_interprocedural_nested_recursive_after(
101 ;; omp_get_thread_num());
105 ;; void simple_state_machine_interprocedural_nested_recursive_after(int a) {
108 ;; simple_state_machine_interprocedural_nested_recursive_after(a - 1);
109 ;; simple_state_machine_interprocedural_nested_recursive_after_after();
111 ;; void simple_state_machine_interprocedural_nested_recursive_after_after(void) {
112 ;; #pragma omp parallel
116 ;; __attribute__((weak)) void weak_callee_empty(void) {}
117 ;; void no_state_machine_weak_callee() {
118 ;; #pragma omp target teams
119 ;; { weak_callee_empty(); }
122 %struct.ident_t = type { i32, i32, i32, i32, i8* }
124 @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
125 @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @0, i32 0, i32 0) }, align 8
126 @__omp_offloading_14_a36502b_no_state_machine_needed_l14_exec_mode = weak constant i8 1
127 @__omp_offloading_14_a36502b_simple_state_machine_l22_exec_mode = weak constant i8 1
128 @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_exec_mode = weak constant i8 1
129 @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_exec_mode = weak constant i8 1
130 @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_exec_mode = weak constant i8 1
131 @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_exec_mode = weak constant i8 1
132 @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_exec_mode = weak constant i8 1
133 @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_exec_mode = weak constant i8 1
134 @2 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @0, i32 0, i32 0) }, align 8
135 @G = external global i32, align 4
136 @3 = private unnamed_addr constant %struct.ident_t { i32 0, i32 322, i32 2, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @0, i32 0, i32 0) }, align 8
137 @llvm.compiler.used = appending global [8 x i8*] [i8* @__omp_offloading_14_a36502b_no_state_machine_needed_l14_exec_mode, i8* @__omp_offloading_14_a36502b_simple_state_machine_l22_exec_mode, i8* @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39_exec_mode, i8* @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55_exec_mode, i8* @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66_exec_mode, i8* @__omp_offloading_14_a36502b_simple_state_machine_pure_l77_exec_mode, i8* @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92_exec_mode, i8* @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112_exec_mode], section "llvm.metadata"
139 define weak void @__omp_offloading_14_a36502b_no_state_machine_needed_l14() #0 {
141 %.zero.addr = alloca i32, align 4
142 %.threadid_temp. = alloca i32, align 4
143 store i32 0, i32* %.zero.addr, align 4
144 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
145 %exec_user_code = icmp eq i32 %0, -1
146 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
148 user_code.entry: ; preds = %entry
149 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
150 store i32 %1, i32* %.threadid_temp., align 4
151 call void @__omp_outlined__(i32* %.threadid_temp., i32* %.zero.addr) #3
152 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
155 worker.exit: ; preds = %entry
159 declare i32 @__kmpc_target_init(%struct.ident_t*, i8, i1, i1)
161 define internal void @__omp_outlined__(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
163 %.global_tid..addr = alloca i32*, align 8
164 %.bound_tid..addr = alloca i32*, align 8
165 store i32* %.global_tid., i32** %.global_tid..addr, align 8
166 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
167 call void @no_parallel_region_in_here() #7
168 call void @unknown_no_openmp() #8
172 define hidden void @no_parallel_region_in_here() #1 {
174 %0 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @2)
175 %1 = call i32 @__kmpc_single(%struct.ident_t* @2, i32 %0)
176 %2 = icmp ne i32 %1, 0
177 br i1 %2, label %omp_if.then, label %omp_if.end
179 omp_if.then: ; preds = %entry
180 store i32 0, i32* @G, align 4
181 call void @__kmpc_end_single(%struct.ident_t* @2, i32 %0)
184 omp_if.end: ; preds = %omp_if.then, %entry
185 call void @__kmpc_barrier(%struct.ident_t* @3, i32 %0)
189 declare void @unknown_no_openmp() #2
191 declare i32 @__kmpc_global_thread_num(%struct.ident_t*) #3
193 declare void @__kmpc_target_deinit(%struct.ident_t*, i8, i1)
195 define weak void @__omp_offloading_14_a36502b_simple_state_machine_l22() #0 {
197 %.zero.addr = alloca i32, align 4
198 %.threadid_temp. = alloca i32, align 4
199 store i32 0, i32* %.zero.addr, align 4
200 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
201 %exec_user_code = icmp eq i32 %0, -1
202 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
204 user_code.entry: ; preds = %entry
205 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
206 store i32 %1, i32* %.threadid_temp., align 4
207 call void @__omp_outlined__1(i32* %.threadid_temp., i32* %.zero.addr) #3
208 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
211 worker.exit: ; preds = %entry
215 define internal void @__omp_outlined__1(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
217 %.global_tid..addr = alloca i32*, align 8
218 %.bound_tid..addr = alloca i32*, align 8
219 %captured_vars_addrs = alloca [0 x i8*], align 8
220 %captured_vars_addrs1 = alloca [0 x i8*], align 8
221 store i32* %.global_tid., i32** %.global_tid..addr, align 8
222 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
223 call void @unknown_no_openmp() #8
224 %0 = load i32*, i32** %.global_tid..addr, align 8
225 %1 = load i32, i32* %0, align 4
226 %2 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
227 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** %2, i64 0)
228 call void @no_parallel_region_in_here() #7
229 %3 = bitcast [0 x i8*]* %captured_vars_addrs1 to i8**
230 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** %3, i64 0)
234 define internal void @__omp_outlined__2(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
236 %.global_tid..addr = alloca i32*, align 8
237 %.bound_tid..addr = alloca i32*, align 8
238 store i32* %.global_tid., i32** %.global_tid..addr, align 8
239 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
244 declare void @p0() #4
246 define internal void @__omp_outlined__2_wrapper(i16 zeroext %0, i32 %1) #0 {
248 %.addr = alloca i16, align 2
249 %.addr1 = alloca i32, align 4
250 %.zero.addr = alloca i32, align 4
251 %global_args = alloca i8**, align 8
252 store i32 0, i32* %.zero.addr, align 4
253 store i16 %0, i16* %.addr, align 2
254 store i32 %1, i32* %.addr1, align 4
255 call void @__kmpc_get_shared_variables(i8*** %global_args)
256 call void @__omp_outlined__2(i32* %.addr1, i32* %.zero.addr) #3
260 declare void @__kmpc_get_shared_variables(i8***)
262 declare void @__kmpc_parallel_51(%struct.ident_t*, i32, i32, i32, i32, i8*, i8*, i8**, i64)
264 define internal void @__omp_outlined__3(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
266 %.global_tid..addr = alloca i32*, align 8
267 %.bound_tid..addr = alloca i32*, align 8
268 store i32* %.global_tid., i32** %.global_tid..addr, align 8
269 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
274 declare void @p1() #4
276 define internal void @__omp_outlined__3_wrapper(i16 zeroext %0, i32 %1) #0 {
278 %.addr = alloca i16, align 2
279 %.addr1 = alloca i32, align 4
280 %.zero.addr = alloca i32, align 4
281 %global_args = alloca i8**, align 8
282 store i32 0, i32* %.zero.addr, align 4
283 store i16 %0, i16* %.addr, align 2
284 store i32 %1, i32* %.addr1, align 4
285 call void @__kmpc_get_shared_variables(i8*** %global_args)
286 call void @__omp_outlined__3(i32* %.addr1, i32* %.zero.addr) #3
290 define weak void @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39() #0 {
292 %.zero.addr = alloca i32, align 4
293 %.threadid_temp. = alloca i32, align 4
294 store i32 0, i32* %.zero.addr, align 4
295 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
296 %exec_user_code = icmp eq i32 %0, -1
297 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
299 user_code.entry: ; preds = %entry
300 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
301 store i32 %1, i32* %.threadid_temp., align 4
302 call void @__omp_outlined__4(i32* %.threadid_temp., i32* %.zero.addr) #3
303 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
306 worker.exit: ; preds = %entry
310 define internal void @__omp_outlined__4(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
312 %.global_tid..addr = alloca i32*, align 8
313 %.bound_tid..addr = alloca i32*, align 8
314 %captured_vars_addrs = alloca [0 x i8*], align 8
315 store i32* %.global_tid., i32** %.global_tid..addr, align 8
316 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
317 call void @unknown_no_openmp() #8
318 call void @simple_state_machine_interprocedural_before() #7
319 call void @no_parallel_region_in_here() #7
320 %0 = load i32*, i32** %.global_tid..addr, align 8
321 %1 = load i32, i32* %0, align 4
322 %2 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
323 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__5 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__5_wrapper to i8*), i8** %2, i64 0)
324 call void @simple_state_machine_interprocedural_after() #7
328 define hidden void @simple_state_machine_interprocedural_before() #1 {
330 %captured_vars_addrs = alloca [0 x i8*], align 8
331 %0 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @2)
332 %1 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
333 call void @__kmpc_parallel_51(%struct.ident_t* @2, i32 %0, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** %1, i64 0)
337 define internal void @__omp_outlined__5(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
339 %.global_tid..addr = alloca i32*, align 8
340 %.bound_tid..addr = alloca i32*, align 8
341 store i32* %.global_tid., i32** %.global_tid..addr, align 8
342 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
347 define internal void @__omp_outlined__5_wrapper(i16 zeroext %0, i32 %1) #0 {
349 %.addr = alloca i16, align 2
350 %.addr1 = alloca i32, align 4
351 %.zero.addr = alloca i32, align 4
352 %global_args = alloca i8**, align 8
353 store i32 0, i32* %.zero.addr, align 4
354 store i16 %0, i16* %.addr, align 2
355 store i32 %1, i32* %.addr1, align 4
356 call void @__kmpc_get_shared_variables(i8*** %global_args)
357 call void @__omp_outlined__5(i32* %.addr1, i32* %.zero.addr) #3
361 define hidden void @simple_state_machine_interprocedural_after() #1 {
363 %captured_vars_addrs = alloca [0 x i8*], align 8
364 %0 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @2)
365 %1 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
366 call void @__kmpc_parallel_51(%struct.ident_t* @2, i32 %0, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** %1, i64 0)
370 define weak void @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55() #0 {
372 %.zero.addr = alloca i32, align 4
373 %.threadid_temp. = alloca i32, align 4
374 store i32 0, i32* %.zero.addr, align 4
375 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
376 %exec_user_code = icmp eq i32 %0, -1
377 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
379 user_code.entry: ; preds = %entry
380 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
381 store i32 %1, i32* %.threadid_temp., align 4
382 call void @__omp_outlined__6(i32* %.threadid_temp., i32* %.zero.addr) #3
383 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
386 worker.exit: ; preds = %entry
390 define internal void @__omp_outlined__6(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
392 %.global_tid..addr = alloca i32*, align 8
393 %.bound_tid..addr = alloca i32*, align 8
394 %captured_vars_addrs = alloca [0 x i8*], align 8
395 %captured_vars_addrs1 = alloca [0 x i8*], align 8
396 store i32* %.global_tid., i32** %.global_tid..addr, align 8
397 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
398 %0 = load i32*, i32** %.global_tid..addr, align 8
399 %1 = load i32, i32* %0, align 4
400 %2 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
401 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__7 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__7_wrapper to i8*), i8** %2, i64 0)
402 %call = call i32 @unknown() #7
403 %3 = bitcast [0 x i8*]* %captured_vars_addrs1 to i8**
404 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__8 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__8_wrapper to i8*), i8** %3, i64 0)
408 define internal void @__omp_outlined__7(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
410 %.global_tid..addr = alloca i32*, align 8
411 %.bound_tid..addr = alloca i32*, align 8
412 store i32* %.global_tid., i32** %.global_tid..addr, align 8
413 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
418 define internal void @__omp_outlined__7_wrapper(i16 zeroext %0, i32 %1) #0 {
420 %.addr = alloca i16, align 2
421 %.addr1 = alloca i32, align 4
422 %.zero.addr = alloca i32, align 4
423 %global_args = alloca i8**, align 8
424 store i32 0, i32* %.zero.addr, align 4
425 store i16 %0, i16* %.addr, align 2
426 store i32 %1, i32* %.addr1, align 4
427 call void @__kmpc_get_shared_variables(i8*** %global_args)
428 call void @__omp_outlined__7(i32* %.addr1, i32* %.zero.addr) #3
432 declare i32 @unknown() #4
434 define internal void @__omp_outlined__8(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
436 %.global_tid..addr = alloca i32*, align 8
437 %.bound_tid..addr = alloca i32*, align 8
438 store i32* %.global_tid., i32** %.global_tid..addr, align 8
439 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
444 define internal void @__omp_outlined__8_wrapper(i16 zeroext %0, i32 %1) #0 {
446 %.addr = alloca i16, align 2
447 %.addr1 = alloca i32, align 4
448 %.zero.addr = alloca i32, align 4
449 %global_args = alloca i8**, align 8
450 store i32 0, i32* %.zero.addr, align 4
451 store i16 %0, i16* %.addr, align 2
452 store i32 %1, i32* %.addr1, align 4
453 call void @__kmpc_get_shared_variables(i8*** %global_args)
454 call void @__omp_outlined__8(i32* %.addr1, i32* %.zero.addr) #3
458 define weak void @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66() #0 {
460 %.zero.addr = alloca i32, align 4
461 %.threadid_temp. = alloca i32, align 4
462 store i32 0, i32* %.zero.addr, align 4
463 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
464 %exec_user_code = icmp eq i32 %0, -1
465 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
467 user_code.entry: ; preds = %entry
468 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
469 store i32 %1, i32* %.threadid_temp., align 4
470 call void @__omp_outlined__9(i32* %.threadid_temp., i32* %.zero.addr) #3
471 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
474 worker.exit: ; preds = %entry
478 define internal void @__omp_outlined__9(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
480 %.global_tid..addr = alloca i32*, align 8
481 %.bound_tid..addr = alloca i32*, align 8
482 %captured_vars_addrs = alloca [0 x i8*], align 8
483 %captured_vars_addrs1 = alloca [0 x i8*], align 8
484 store i32* %.global_tid., i32** %.global_tid..addr, align 8
485 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
486 %0 = load i32*, i32** %.global_tid..addr, align 8
487 %1 = load i32, i32* %0, align 4
488 %2 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
489 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__10 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__10_wrapper to i8*), i8** %2, i64 0)
490 call void @unknown_no_openmp() #8
491 %3 = bitcast [0 x i8*]* %captured_vars_addrs1 to i8**
492 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__11 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__11_wrapper to i8*), i8** %3, i64 0)
496 define internal void @__omp_outlined__10(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
498 %.global_tid..addr = alloca i32*, align 8
499 %.bound_tid..addr = alloca i32*, align 8
500 store i32* %.global_tid., i32** %.global_tid..addr, align 8
501 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
506 define internal void @__omp_outlined__10_wrapper(i16 zeroext %0, i32 %1) #0 {
508 %.addr = alloca i16, align 2
509 %.addr1 = alloca i32, align 4
510 %.zero.addr = alloca i32, align 4
511 %global_args = alloca i8**, align 8
512 store i32 0, i32* %.zero.addr, align 4
513 store i16 %0, i16* %.addr, align 2
514 store i32 %1, i32* %.addr1, align 4
515 call void @__kmpc_get_shared_variables(i8*** %global_args)
516 call void @__omp_outlined__10(i32* %.addr1, i32* %.zero.addr) #3
520 define internal void @__omp_outlined__11(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
522 %.global_tid..addr = alloca i32*, align 8
523 %.bound_tid..addr = alloca i32*, align 8
524 store i32* %.global_tid., i32** %.global_tid..addr, align 8
525 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
530 define internal void @__omp_outlined__11_wrapper(i16 zeroext %0, i32 %1) #0 {
532 %.addr = alloca i16, align 2
533 %.addr1 = alloca i32, align 4
534 %.zero.addr = alloca i32, align 4
535 %global_args = alloca i8**, align 8
536 store i32 0, i32* %.zero.addr, align 4
537 store i16 %0, i16* %.addr, align 2
538 store i32 %1, i32* %.addr1, align 4
539 call void @__kmpc_get_shared_variables(i8*** %global_args)
540 call void @__omp_outlined__11(i32* %.addr1, i32* %.zero.addr) #3
544 define weak void @__omp_offloading_14_a36502b_simple_state_machine_pure_l77() #0 {
546 %.zero.addr = alloca i32, align 4
547 %.threadid_temp. = alloca i32, align 4
548 store i32 0, i32* %.zero.addr, align 4
549 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
550 %exec_user_code = icmp eq i32 %0, -1
551 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
553 user_code.entry: ; preds = %entry
554 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
555 store i32 %1, i32* %.threadid_temp., align 4
556 call void @__omp_outlined__12(i32* %.threadid_temp., i32* %.zero.addr) #3
557 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
560 worker.exit: ; preds = %entry
564 define internal void @__omp_outlined__12(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
566 %.global_tid..addr = alloca i32*, align 8
567 %.bound_tid..addr = alloca i32*, align 8
568 %captured_vars_addrs = alloca [0 x i8*], align 8
569 %captured_vars_addrs1 = alloca [0 x i8*], align 8
570 store i32* %.global_tid., i32** %.global_tid..addr, align 8
571 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
572 call void @unknown_no_openmp() #8
573 %0 = load i32*, i32** %.global_tid..addr, align 8
574 %1 = load i32, i32* %0, align 4
575 %2 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
576 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__13 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__13_wrapper to i8*), i8** %2, i64 0)
577 call void @unknown_pure() #9
578 %3 = bitcast [0 x i8*]* %captured_vars_addrs1 to i8**
579 call void @__kmpc_parallel_51(%struct.ident_t* @1, i32 %1, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__14 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__14_wrapper to i8*), i8** %3, i64 0)
583 define internal void @__omp_outlined__13(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
585 %.global_tid..addr = alloca i32*, align 8
586 %.bound_tid..addr = alloca i32*, align 8
587 store i32* %.global_tid., i32** %.global_tid..addr, align 8
588 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
593 define internal void @__omp_outlined__13_wrapper(i16 zeroext %0, i32 %1) #0 {
595 %.addr = alloca i16, align 2
596 %.addr1 = alloca i32, align 4
597 %.zero.addr = alloca i32, align 4
598 %global_args = alloca i8**, align 8
599 store i32 0, i32* %.zero.addr, align 4
600 store i16 %0, i16* %.addr, align 2
601 store i32 %1, i32* %.addr1, align 4
602 call void @__kmpc_get_shared_variables(i8*** %global_args)
603 call void @__omp_outlined__13(i32* %.addr1, i32* %.zero.addr) #3
607 declare void @unknown_pure() #5
609 define internal void @__omp_outlined__14(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
611 %.global_tid..addr = alloca i32*, align 8
612 %.bound_tid..addr = alloca i32*, align 8
613 store i32* %.global_tid., i32** %.global_tid..addr, align 8
614 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
619 define internal void @__omp_outlined__14_wrapper(i16 zeroext %0, i32 %1) #0 {
621 %.addr = alloca i16, align 2
622 %.addr1 = alloca i32, align 4
623 %.zero.addr = alloca i32, align 4
624 %global_args = alloca i8**, align 8
625 store i32 0, i32* %.zero.addr, align 4
626 store i16 %0, i16* %.addr, align 2
627 store i32 %1, i32* %.addr1, align 4
628 call void @__kmpc_get_shared_variables(i8*** %global_args)
629 call void @__omp_outlined__14(i32* %.addr1, i32* %.zero.addr) #3
633 define weak void @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92() #0 {
635 %.zero.addr = alloca i32, align 4
636 %.threadid_temp. = alloca i32, align 4
637 store i32 0, i32* %.zero.addr, align 4
638 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
639 %exec_user_code = icmp eq i32 %0, -1
640 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
642 user_code.entry: ; preds = %entry
643 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
644 store i32 %1, i32* %.threadid_temp., align 4
645 call void @__omp_outlined__15(i32* %.threadid_temp., i32* %.zero.addr) #3
646 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
649 worker.exit: ; preds = %entry
653 define internal void @__omp_outlined__15(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
655 %.global_tid..addr = alloca i32*, align 8
656 %.bound_tid..addr = alloca i32*, align 8
657 store i32* %.global_tid., i32** %.global_tid..addr, align 8
658 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
659 %call = call i32 bitcast (i32 (...)* @omp_get_thread_num to i32 ()*)() #7
660 call void @simple_state_machine_interprocedural_nested_recursive_after(i32 %call) #7
664 define hidden void @simple_state_machine_interprocedural_nested_recursive_after(i32 %a) #1 {
666 %a.addr = alloca i32, align 4
667 store i32 %a, i32* %a.addr, align 4
668 %0 = load i32, i32* %a.addr, align 4
669 %cmp = icmp eq i32 %0, 0
670 br i1 %cmp, label %if.then, label %if.end
672 if.then: ; preds = %entry
675 if.end: ; preds = %entry
676 %1 = load i32, i32* %a.addr, align 4
677 %sub = sub nsw i32 %1, 1
678 call void @simple_state_machine_interprocedural_nested_recursive_after(i32 %sub) #7
679 call void @simple_state_machine_interprocedural_nested_recursive_after_after() #7
682 return: ; preds = %if.end, %if.then
686 declare i32 @omp_get_thread_num(...) #4
688 define weak void @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112() #0 {
690 %.zero.addr = alloca i32, align 4
691 %.threadid_temp. = alloca i32, align 4
692 store i32 0, i32* %.zero.addr, align 4
693 %0 = call i32 @__kmpc_target_init(%struct.ident_t* @1, i8 1, i1 true, i1 true)
694 %exec_user_code = icmp eq i32 %0, -1
695 br i1 %exec_user_code, label %user_code.entry, label %worker.exit
697 user_code.entry: ; preds = %entry
698 %1 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
699 store i32 %1, i32* %.threadid_temp., align 4
700 call void @__omp_outlined__16(i32* %.threadid_temp., i32* %.zero.addr) #3
701 call void @__kmpc_target_deinit(%struct.ident_t* @1, i8 1, i1 true)
704 worker.exit: ; preds = %entry
708 define internal void @__omp_outlined__16(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
710 %.global_tid..addr = alloca i32*, align 8
711 %.bound_tid..addr = alloca i32*, align 8
712 store i32* %.global_tid., i32** %.global_tid..addr, align 8
713 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
714 call void @weak_callee_empty() #7
718 define weak hidden void @weak_callee_empty() #1 {
723 declare i32 @__kmpc_single(%struct.ident_t*, i32) #6
725 declare void @__kmpc_end_single(%struct.ident_t*, i32) #6
727 declare void @__kmpc_barrier(%struct.ident_t*, i32) #6
729 define internal void @__omp_outlined__17(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
731 %.global_tid..addr = alloca i32*, align 8
732 %.bound_tid..addr = alloca i32*, align 8
733 store i32* %.global_tid., i32** %.global_tid..addr, align 8
734 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
739 define internal void @__omp_outlined__17_wrapper(i16 zeroext %0, i32 %1) #0 {
741 %.addr = alloca i16, align 2
742 %.addr1 = alloca i32, align 4
743 %.zero.addr = alloca i32, align 4
744 %global_args = alloca i8**, align 8
745 store i32 0, i32* %.zero.addr, align 4
746 store i16 %0, i16* %.addr, align 2
747 store i32 %1, i32* %.addr1, align 4
748 call void @__kmpc_get_shared_variables(i8*** %global_args)
749 call void @__omp_outlined__17(i32* %.addr1, i32* %.zero.addr) #3
753 define internal void @__omp_outlined__18(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
755 %.global_tid..addr = alloca i32*, align 8
756 %.bound_tid..addr = alloca i32*, align 8
757 store i32* %.global_tid., i32** %.global_tid..addr, align 8
758 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
763 define internal void @__omp_outlined__18_wrapper(i16 zeroext %0, i32 %1) #0 {
765 %.addr = alloca i16, align 2
766 %.addr1 = alloca i32, align 4
767 %.zero.addr = alloca i32, align 4
768 %global_args = alloca i8**, align 8
769 store i32 0, i32* %.zero.addr, align 4
770 store i16 %0, i16* %.addr, align 2
771 store i32 %1, i32* %.addr1, align 4
772 call void @__kmpc_get_shared_variables(i8*** %global_args)
773 call void @__omp_outlined__18(i32* %.addr1, i32* %.zero.addr) #3
777 define hidden void @simple_state_machine_interprocedural_nested_recursive_after_after() #1 {
779 %captured_vars_addrs = alloca [0 x i8*], align 8
780 %0 = call i32 @__kmpc_global_thread_num(%struct.ident_t* @2)
781 %1 = bitcast [0 x i8*]* %captured_vars_addrs to i8**
782 call void @__kmpc_parallel_51(%struct.ident_t* @2, i32 %0, i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** %1, i64 0)
786 define internal void @__omp_outlined__19(i32* noalias %.global_tid., i32* noalias %.bound_tid.) #0 {
788 %.global_tid..addr = alloca i32*, align 8
789 %.bound_tid..addr = alloca i32*, align 8
790 store i32* %.global_tid., i32** %.global_tid..addr, align 8
791 store i32* %.bound_tid., i32** %.bound_tid..addr, align 8
796 define internal void @__omp_outlined__19_wrapper(i16 zeroext %0, i32 %1) #0 {
798 %.addr = alloca i16, align 2
799 %.addr1 = alloca i32, align 4
800 %.zero.addr = alloca i32, align 4
801 %global_args = alloca i8**, align 8
802 store i32 0, i32* %.zero.addr, align 4
803 store i16 %0, i16* %.addr, align 2
804 store i32 %1, i32* %.addr1, align 4
805 call void @__kmpc_get_shared_variables(i8*** %global_args)
806 call void @__omp_outlined__19(i32* %.addr1, i32* %.zero.addr) #3
810 attributes #0 = { convergent noinline norecurse nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
811 attributes #1 = { convergent noinline nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
812 attributes #2 = { convergent "frame-pointer"="none" "llvm.assume"="omp_no_openmp" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
813 attributes #3 = { nounwind }
814 attributes #4 = { convergent "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
815 attributes #5 = { convergent nounwind readonly willreturn "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+ptx32,+sm_20" }
816 attributes #6 = { convergent nounwind }
817 attributes #7 = { convergent }
818 attributes #8 = { convergent "llvm.assume"="omp_no_openmp" }
819 attributes #9 = { convergent nounwind readonly willreturn }
821 !omp_offload.info = !{!0, !1, !2, !3, !4, !5, !6, !7}
822 !nvvm.annotations = !{!8, !9, !10, !11, !12, !13, !14, !15}
823 !llvm.module.flags = !{!16, !17, !18}
825 !0 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural", i32 39, i32 2}
826 !1 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_no_openmp_attr", i32 66, i32 4}
827 !2 = !{i32 0, i32 20, i32 171331627, !"no_state_machine_needed", i32 14, i32 0}
828 !3 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_with_fallback", i32 55, i32 3}
829 !4 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_pure", i32 77, i32 5}
830 !5 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine_interprocedural_nested_recursive", i32 92, i32 6}
831 !6 = !{i32 0, i32 20, i32 171331627, !"no_state_machine_weak_callee", i32 112, i32 7}
832 !7 = !{i32 0, i32 20, i32 171331627, !"simple_state_machine", i32 22, i32 1}
833 !8 = !{void ()* @__omp_offloading_14_a36502b_no_state_machine_needed_l14, !"kernel", i32 1}
834 !9 = !{void ()* @__omp_offloading_14_a36502b_simple_state_machine_l22, !"kernel", i32 1}
835 !10 = !{void ()* @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39, !"kernel", i32 1}
836 !11 = !{void ()* @__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55, !"kernel", i32 1}
837 !12 = !{void ()* @__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66, !"kernel", i32 1}
838 !13 = !{void ()* @__omp_offloading_14_a36502b_simple_state_machine_pure_l77, !"kernel", i32 1}
839 !14 = !{void ()* @__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92, !"kernel", i32 1}
840 !15 = !{void ()* @__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112, !"kernel", i32 1}
841 !16 = !{i32 1, !"wchar_size", i32 4}
842 !17 = !{i32 7, !"openmp", i32 50}
843 !18 = !{i32 7, !"openmp-device", i32 50}
844 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
845 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
846 ; AMDGPU-SAME: () #[[ATTR0:[0-9]+]] {
847 ; AMDGPU-NEXT: entry:
848 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
849 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
850 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 false, i1 true)
851 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
852 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
853 ; AMDGPU: user_code.entry:
854 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3:[0-9]+]]
855 ; AMDGPU-NEXT: call void @__omp_outlined__(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
856 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
857 ; AMDGPU-NEXT: ret void
858 ; AMDGPU: worker.exit:
859 ; AMDGPU-NEXT: ret void
862 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
863 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__
864 ; AMDGPU-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
865 ; AMDGPU-NEXT: entry:
866 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
867 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
868 ; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8:[0-9]+]]
869 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR9:[0-9]+]]
870 ; AMDGPU-NEXT: ret void
873 ; AMDGPU: Function Attrs: convergent noinline nounwind
874 ; AMDGPU-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
875 ; AMDGPU-SAME: () #[[ATTR1:[0-9]+]] {
876 ; AMDGPU-NEXT: entry:
877 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2:[0-9]+]]) #[[ATTR3]]
878 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
879 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
880 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
881 ; AMDGPU: omp_if.then:
882 ; AMDGPU-NEXT: store i32 0, i32* @G, align 4
883 ; AMDGPU-NEXT: call void @__kmpc_end_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
884 ; AMDGPU-NEXT: br label [[OMP_IF_END]]
885 ; AMDGPU: omp_if.end:
886 ; AMDGPU-NEXT: call void @__kmpc_barrier(%struct.ident_t* noundef @[[GLOB3:[0-9]+]], i32 [[TMP0]]) #[[ATTR3]]
887 ; AMDGPU-NEXT: ret void
890 ; AMDGPU: Function Attrs: convergent noinline nounwind
891 ; AMDGPU-LABEL: define {{[^@]+}}@no_parallel_region_in_here
892 ; AMDGPU-SAME: () #[[ATTR1]] {
893 ; AMDGPU-NEXT: entry:
894 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
895 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
896 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
897 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
898 ; AMDGPU: omp_if.then:
899 ; AMDGPU-NEXT: store i32 0, i32* @G, align 4
900 ; AMDGPU-NEXT: call void @__kmpc_end_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
901 ; AMDGPU-NEXT: br label [[OMP_IF_END]]
902 ; AMDGPU: omp_if.end:
903 ; AMDGPU-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]])
904 ; AMDGPU-NEXT: ret void
907 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
908 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
909 ; AMDGPU-SAME: () #[[ATTR0]] {
910 ; AMDGPU-NEXT: entry:
911 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8, addrspace(5)
912 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
913 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
914 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
915 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
916 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
917 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
918 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
919 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
920 ; AMDGPU: is_worker_check:
921 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
922 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
923 ; AMDGPU: worker_state_machine.begin:
924 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
925 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast i8* addrspace(5)* [[WORKER_WORK_FN_ADDR]] to i8**
926 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR_GENERIC]])
927 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
928 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
929 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
930 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
931 ; AMDGPU: worker_state_machine.finished:
932 ; AMDGPU-NEXT: ret void
933 ; AMDGPU: worker_state_machine.is_active.check:
934 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
935 ; AMDGPU: worker_state_machine.parallel_region.check:
936 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__2_wrapper.ID to void (i16, i32)*)
937 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
938 ; AMDGPU: worker_state_machine.parallel_region.execute:
939 ; AMDGPU-NEXT: call void @__omp_outlined__2_wrapper(i16 0, i32 [[TMP0]])
940 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
941 ; AMDGPU: worker_state_machine.parallel_region.check1:
942 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
943 ; AMDGPU: worker_state_machine.parallel_region.execute2:
944 ; AMDGPU-NEXT: call void @__omp_outlined__3_wrapper(i16 0, i32 [[TMP0]])
945 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
946 ; AMDGPU: worker_state_machine.parallel_region.check3:
947 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
948 ; AMDGPU: worker_state_machine.parallel_region.end:
949 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
950 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
951 ; AMDGPU: worker_state_machine.done.barrier:
952 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
953 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
954 ; AMDGPU: thread.user_code.check:
955 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
956 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
957 ; AMDGPU: user_code.entry:
958 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
959 ; AMDGPU-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
960 ; AMDGPU-NEXT: call void @__omp_outlined__1(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
961 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
962 ; AMDGPU-NEXT: ret void
963 ; AMDGPU: worker.exit:
964 ; AMDGPU-NEXT: ret void
967 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
968 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__1
969 ; AMDGPU-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
970 ; AMDGPU-NEXT: entry:
971 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
972 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
973 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
974 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
975 ; AMDGPU-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
976 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
977 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
978 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
979 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* noundef @__omp_outlined__2_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
980 ; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
981 ; AMDGPU-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
982 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* noundef @__omp_outlined__3_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
983 ; AMDGPU-NEXT: ret void
986 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
987 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__2
988 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
989 ; AMDGPU-NEXT: entry:
990 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
991 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
992 ; AMDGPU-NEXT: call void @p0() #[[ATTR10:[0-9]+]]
993 ; AMDGPU-NEXT: ret void
996 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
997 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
998 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
999 ; AMDGPU-NEXT: entry:
1000 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1001 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1002 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1003 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1004 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1005 ; AMDGPU-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1006 ; AMDGPU-NEXT: ret void
1009 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1010 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3
1011 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1012 ; AMDGPU-NEXT: entry:
1013 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1014 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1015 ; AMDGPU-NEXT: call void @p1() #[[ATTR10]]
1016 ; AMDGPU-NEXT: ret void
1019 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1020 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
1021 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1022 ; AMDGPU-NEXT: entry:
1023 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1024 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1025 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1026 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1027 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1028 ; AMDGPU-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1029 ; AMDGPU-NEXT: ret void
1032 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1033 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
1034 ; AMDGPU-SAME: () #[[ATTR0]] {
1035 ; AMDGPU-NEXT: entry:
1036 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8, addrspace(5)
1037 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1038 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1039 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
1040 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1041 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1042 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1043 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1044 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1045 ; AMDGPU: is_worker_check:
1046 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1047 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1048 ; AMDGPU: worker_state_machine.begin:
1049 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1050 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast i8* addrspace(5)* [[WORKER_WORK_FN_ADDR]] to i8**
1051 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR_GENERIC]])
1052 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1053 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
1054 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
1055 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1056 ; AMDGPU: worker_state_machine.finished:
1057 ; AMDGPU-NEXT: ret void
1058 ; AMDGPU: worker_state_machine.is_active.check:
1059 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1060 ; AMDGPU: worker_state_machine.parallel_region.check:
1061 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], @__omp_outlined__17_wrapper
1062 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1063 ; AMDGPU: worker_state_machine.parallel_region.execute:
1064 ; AMDGPU-NEXT: call void @__omp_outlined__17_wrapper(i16 0, i32 [[TMP0]])
1065 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1066 ; AMDGPU: worker_state_machine.parallel_region.check1:
1067 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__5_wrapper.ID to void (i16, i32)*)
1068 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1069 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1070 ; AMDGPU-NEXT: call void @__omp_outlined__5_wrapper(i16 0, i32 [[TMP0]])
1071 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1072 ; AMDGPU: worker_state_machine.parallel_region.check3:
1073 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE5:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK6:%.*]]
1074 ; AMDGPU: worker_state_machine.parallel_region.execute5:
1075 ; AMDGPU-NEXT: call void @__omp_outlined__18_wrapper(i16 0, i32 [[TMP0]])
1076 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1077 ; AMDGPU: worker_state_machine.parallel_region.check6:
1078 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1079 ; AMDGPU: worker_state_machine.parallel_region.end:
1080 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1081 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1082 ; AMDGPU: worker_state_machine.done.barrier:
1083 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1084 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1085 ; AMDGPU: thread.user_code.check:
1086 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1087 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1088 ; AMDGPU: user_code.entry:
1089 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
1090 ; AMDGPU-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1091 ; AMDGPU-NEXT: call void @__omp_outlined__4(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1092 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1093 ; AMDGPU-NEXT: ret void
1094 ; AMDGPU: worker.exit:
1095 ; AMDGPU-NEXT: ret void
1098 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1099 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__4
1100 ; AMDGPU-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1101 ; AMDGPU-NEXT: entry:
1102 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1103 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1104 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1105 ; AMDGPU-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1106 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
1107 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR8]]
1108 ; AMDGPU-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
1109 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
1110 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1111 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__5 to i8*), i8* noundef @__omp_outlined__5_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
1112 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR8]]
1113 ; AMDGPU-NEXT: ret void
1116 ; AMDGPU: Function Attrs: convergent noinline nounwind
1117 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
1118 ; AMDGPU-SAME: () #[[ATTR1]] {
1119 ; AMDGPU-NEXT: entry:
1120 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1121 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
1122 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1123 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
1124 ; AMDGPU-NEXT: ret void
1127 ; AMDGPU: Function Attrs: convergent noinline nounwind
1128 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
1129 ; AMDGPU-SAME: () #[[ATTR1]] {
1130 ; AMDGPU-NEXT: entry:
1131 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1132 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1133 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1134 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** [[TMP1]], i64 0)
1135 ; AMDGPU-NEXT: ret void
1138 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1139 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5
1140 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1141 ; AMDGPU-NEXT: entry:
1142 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1143 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1144 ; AMDGPU-NEXT: call void @p1() #[[ATTR10]]
1145 ; AMDGPU-NEXT: ret void
1148 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1149 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
1150 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1151 ; AMDGPU-NEXT: entry:
1152 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1153 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1154 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1155 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1156 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1157 ; AMDGPU-NEXT: call void @__omp_outlined__5(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1158 ; AMDGPU-NEXT: ret void
1161 ; AMDGPU: Function Attrs: convergent noinline nounwind
1162 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
1163 ; AMDGPU-SAME: () #[[ATTR1]] {
1164 ; AMDGPU-NEXT: entry:
1165 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1166 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
1167 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1168 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
1169 ; AMDGPU-NEXT: ret void
1172 ; AMDGPU: Function Attrs: convergent noinline nounwind
1173 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
1174 ; AMDGPU-SAME: () #[[ATTR1]] {
1175 ; AMDGPU-NEXT: entry:
1176 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1177 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1178 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1179 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** [[TMP1]], i64 0)
1180 ; AMDGPU-NEXT: ret void
1183 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1184 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
1185 ; AMDGPU-SAME: () #[[ATTR0]] {
1186 ; AMDGPU-NEXT: entry:
1187 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8, addrspace(5)
1188 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1189 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1190 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
1191 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1192 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1193 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1194 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1195 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1196 ; AMDGPU: is_worker_check:
1197 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1198 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1199 ; AMDGPU: worker_state_machine.begin:
1200 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1201 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast i8* addrspace(5)* [[WORKER_WORK_FN_ADDR]] to i8**
1202 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR_GENERIC]])
1203 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1204 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
1205 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
1206 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1207 ; AMDGPU: worker_state_machine.finished:
1208 ; AMDGPU-NEXT: ret void
1209 ; AMDGPU: worker_state_machine.is_active.check:
1210 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1211 ; AMDGPU: worker_state_machine.parallel_region.check:
1212 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__7_wrapper.ID to void (i16, i32)*)
1213 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1214 ; AMDGPU: worker_state_machine.parallel_region.execute:
1215 ; AMDGPU-NEXT: call void @__omp_outlined__7_wrapper(i16 0, i32 [[TMP0]])
1216 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1217 ; AMDGPU: worker_state_machine.parallel_region.check1:
1218 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__8_wrapper.ID to void (i16, i32)*)
1219 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]]
1220 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1221 ; AMDGPU-NEXT: call void @__omp_outlined__8_wrapper(i16 0, i32 [[TMP0]])
1222 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1223 ; AMDGPU: worker_state_machine.parallel_region.fallback.execute:
1224 ; AMDGPU-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
1225 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1226 ; AMDGPU: worker_state_machine.parallel_region.end:
1227 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1228 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1229 ; AMDGPU: worker_state_machine.done.barrier:
1230 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1231 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1232 ; AMDGPU: thread.user_code.check:
1233 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1234 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1235 ; AMDGPU: user_code.entry:
1236 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
1237 ; AMDGPU-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1238 ; AMDGPU-NEXT: call void @__omp_outlined__6(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1239 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1240 ; AMDGPU-NEXT: ret void
1241 ; AMDGPU: worker.exit:
1242 ; AMDGPU-NEXT: ret void
1245 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1246 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__6
1247 ; AMDGPU-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1248 ; AMDGPU-NEXT: entry:
1249 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1250 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1251 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1252 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
1253 ; AMDGPU-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1254 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
1255 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1256 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__7 to i8*), i8* noundef @__omp_outlined__7_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
1257 ; AMDGPU-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR10]]
1258 ; AMDGPU-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
1259 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__8 to i8*), i8* noundef @__omp_outlined__8_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
1260 ; AMDGPU-NEXT: ret void
1263 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1264 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7
1265 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1266 ; AMDGPU-NEXT: entry:
1267 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1268 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1269 ; AMDGPU-NEXT: call void @p0() #[[ATTR10]]
1270 ; AMDGPU-NEXT: ret void
1273 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1274 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
1275 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1276 ; AMDGPU-NEXT: entry:
1277 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1278 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1279 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1280 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1281 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1282 ; AMDGPU-NEXT: call void @__omp_outlined__7(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1283 ; AMDGPU-NEXT: ret void
1286 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1287 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__8
1288 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1289 ; AMDGPU-NEXT: entry:
1290 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1291 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1292 ; AMDGPU-NEXT: call void @p1() #[[ATTR10]]
1293 ; AMDGPU-NEXT: ret void
1296 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1297 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
1298 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1299 ; AMDGPU-NEXT: entry:
1300 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1301 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1302 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1303 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1304 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1305 ; AMDGPU-NEXT: call void @__omp_outlined__8(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1306 ; AMDGPU-NEXT: ret void
1309 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1310 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
1311 ; AMDGPU-SAME: () #[[ATTR0]] {
1312 ; AMDGPU-NEXT: entry:
1313 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8, addrspace(5)
1314 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1315 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1316 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
1317 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1318 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1319 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1320 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1321 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1322 ; AMDGPU: is_worker_check:
1323 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1324 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1325 ; AMDGPU: worker_state_machine.begin:
1326 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1327 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast i8* addrspace(5)* [[WORKER_WORK_FN_ADDR]] to i8**
1328 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR_GENERIC]])
1329 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1330 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
1331 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
1332 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1333 ; AMDGPU: worker_state_machine.finished:
1334 ; AMDGPU-NEXT: ret void
1335 ; AMDGPU: worker_state_machine.is_active.check:
1336 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1337 ; AMDGPU: worker_state_machine.parallel_region.check:
1338 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__10_wrapper.ID to void (i16, i32)*)
1339 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1340 ; AMDGPU: worker_state_machine.parallel_region.execute:
1341 ; AMDGPU-NEXT: call void @__omp_outlined__10_wrapper(i16 0, i32 [[TMP0]])
1342 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1343 ; AMDGPU: worker_state_machine.parallel_region.check1:
1344 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1345 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1346 ; AMDGPU-NEXT: call void @__omp_outlined__11_wrapper(i16 0, i32 [[TMP0]])
1347 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1348 ; AMDGPU: worker_state_machine.parallel_region.check3:
1349 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1350 ; AMDGPU: worker_state_machine.parallel_region.end:
1351 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1352 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1353 ; AMDGPU: worker_state_machine.done.barrier:
1354 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1355 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1356 ; AMDGPU: thread.user_code.check:
1357 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1358 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1359 ; AMDGPU: user_code.entry:
1360 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
1361 ; AMDGPU-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1362 ; AMDGPU-NEXT: call void @__omp_outlined__9(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1363 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1364 ; AMDGPU-NEXT: ret void
1365 ; AMDGPU: worker.exit:
1366 ; AMDGPU-NEXT: ret void
1369 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1370 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__9
1371 ; AMDGPU-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1372 ; AMDGPU-NEXT: entry:
1373 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1374 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1375 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1376 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
1377 ; AMDGPU-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1378 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
1379 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1380 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__10 to i8*), i8* noundef @__omp_outlined__10_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
1381 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
1382 ; AMDGPU-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
1383 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__11 to i8*), i8* noundef @__omp_outlined__11_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
1384 ; AMDGPU-NEXT: ret void
1387 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1388 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__10
1389 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1390 ; AMDGPU-NEXT: entry:
1391 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1392 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1393 ; AMDGPU-NEXT: call void @p0() #[[ATTR10]]
1394 ; AMDGPU-NEXT: ret void
1397 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1398 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
1399 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1400 ; AMDGPU-NEXT: entry:
1401 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1402 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1403 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1404 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1405 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1406 ; AMDGPU-NEXT: call void @__omp_outlined__10(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1407 ; AMDGPU-NEXT: ret void
1410 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1411 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__11
1412 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1413 ; AMDGPU-NEXT: entry:
1414 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1415 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1416 ; AMDGPU-NEXT: call void @p1() #[[ATTR10]]
1417 ; AMDGPU-NEXT: ret void
1420 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1421 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
1422 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1423 ; AMDGPU-NEXT: entry:
1424 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1425 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1426 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1427 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1428 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1429 ; AMDGPU-NEXT: call void @__omp_outlined__11(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1430 ; AMDGPU-NEXT: ret void
1433 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1434 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
1435 ; AMDGPU-SAME: () #[[ATTR0]] {
1436 ; AMDGPU-NEXT: entry:
1437 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8, addrspace(5)
1438 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1439 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1440 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
1441 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1442 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1443 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1444 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1445 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1446 ; AMDGPU: is_worker_check:
1447 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1448 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1449 ; AMDGPU: worker_state_machine.begin:
1450 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1451 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast i8* addrspace(5)* [[WORKER_WORK_FN_ADDR]] to i8**
1452 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR_GENERIC]])
1453 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1454 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
1455 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
1456 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1457 ; AMDGPU: worker_state_machine.finished:
1458 ; AMDGPU-NEXT: ret void
1459 ; AMDGPU: worker_state_machine.is_active.check:
1460 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1461 ; AMDGPU: worker_state_machine.parallel_region.check:
1462 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__13_wrapper.ID to void (i16, i32)*)
1463 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1464 ; AMDGPU: worker_state_machine.parallel_region.execute:
1465 ; AMDGPU-NEXT: call void @__omp_outlined__13_wrapper(i16 0, i32 [[TMP0]])
1466 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1467 ; AMDGPU: worker_state_machine.parallel_region.check1:
1468 ; AMDGPU-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1469 ; AMDGPU: worker_state_machine.parallel_region.execute2:
1470 ; AMDGPU-NEXT: call void @__omp_outlined__14_wrapper(i16 0, i32 [[TMP0]])
1471 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1472 ; AMDGPU: worker_state_machine.parallel_region.check3:
1473 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1474 ; AMDGPU: worker_state_machine.parallel_region.end:
1475 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1476 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1477 ; AMDGPU: worker_state_machine.done.barrier:
1478 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1479 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1480 ; AMDGPU: thread.user_code.check:
1481 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1482 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1483 ; AMDGPU: user_code.entry:
1484 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
1485 ; AMDGPU-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1486 ; AMDGPU-NEXT: call void @__omp_outlined__12(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1487 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1488 ; AMDGPU-NEXT: ret void
1489 ; AMDGPU: worker.exit:
1490 ; AMDGPU-NEXT: ret void
1493 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1494 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__12
1495 ; AMDGPU-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1496 ; AMDGPU-NEXT: entry:
1497 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1498 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1499 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1500 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
1501 ; AMDGPU-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1502 ; AMDGPU-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
1503 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
1504 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1505 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__13 to i8*), i8* noundef @__omp_outlined__13_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
1506 ; AMDGPU-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
1507 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__14 to i8*), i8* noundef @__omp_outlined__14_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
1508 ; AMDGPU-NEXT: ret void
1511 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1512 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__13
1513 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1514 ; AMDGPU-NEXT: entry:
1515 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1516 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1517 ; AMDGPU-NEXT: call void @p0() #[[ATTR10]]
1518 ; AMDGPU-NEXT: ret void
1521 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1522 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
1523 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1524 ; AMDGPU-NEXT: entry:
1525 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1526 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1527 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1528 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1529 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1530 ; AMDGPU-NEXT: call void @__omp_outlined__13(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1531 ; AMDGPU-NEXT: ret void
1534 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1535 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__14
1536 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1537 ; AMDGPU-NEXT: entry:
1538 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1539 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1540 ; AMDGPU-NEXT: call void @p1() #[[ATTR10]]
1541 ; AMDGPU-NEXT: ret void
1544 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1545 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
1546 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1547 ; AMDGPU-NEXT: entry:
1548 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1549 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1550 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1551 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1552 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1553 ; AMDGPU-NEXT: call void @__omp_outlined__14(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1554 ; AMDGPU-NEXT: ret void
1557 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1558 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
1559 ; AMDGPU-SAME: () #[[ATTR0]] {
1560 ; AMDGPU-NEXT: entry:
1561 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8, addrspace(5)
1562 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1563 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1564 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
1565 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1566 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1567 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1568 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1569 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1570 ; AMDGPU: is_worker_check:
1571 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1572 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1573 ; AMDGPU: worker_state_machine.begin:
1574 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1575 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast i8* addrspace(5)* [[WORKER_WORK_FN_ADDR]] to i8**
1576 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR_GENERIC]])
1577 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1578 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
1579 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
1580 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1581 ; AMDGPU: worker_state_machine.finished:
1582 ; AMDGPU-NEXT: ret void
1583 ; AMDGPU: worker_state_machine.is_active.check:
1584 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1585 ; AMDGPU: worker_state_machine.parallel_region.check:
1586 ; AMDGPU-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], @__omp_outlined__19_wrapper
1587 ; AMDGPU-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]]
1588 ; AMDGPU: worker_state_machine.parallel_region.execute:
1589 ; AMDGPU-NEXT: call void @__omp_outlined__19_wrapper(i16 0, i32 [[TMP0]])
1590 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1591 ; AMDGPU: worker_state_machine.parallel_region.fallback.execute:
1592 ; AMDGPU-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
1593 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1594 ; AMDGPU: worker_state_machine.parallel_region.end:
1595 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1596 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1597 ; AMDGPU: worker_state_machine.done.barrier:
1598 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1599 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1600 ; AMDGPU: thread.user_code.check:
1601 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1602 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1603 ; AMDGPU: user_code.entry:
1604 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
1605 ; AMDGPU-NEXT: call void @__omp_outlined__15(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1606 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1607 ; AMDGPU-NEXT: ret void
1608 ; AMDGPU: worker.exit:
1609 ; AMDGPU-NEXT: ret void
1612 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1613 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__15
1614 ; AMDGPU-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1615 ; AMDGPU-NEXT: entry:
1616 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1617 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1618 ; AMDGPU-NEXT: [[CALL:%.*]] = call i32 bitcast (i32 (...)* @omp_get_thread_num to i32 ()*)() #[[ATTR10]]
1619 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR8]]
1620 ; AMDGPU-NEXT: ret void
1623 ; AMDGPU: Function Attrs: convergent noinline nounwind
1624 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
1625 ; AMDGPU-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
1626 ; AMDGPU-NEXT: entry:
1627 ; AMDGPU-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
1628 ; AMDGPU-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
1629 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
1630 ; AMDGPU-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
1632 ; AMDGPU-NEXT: br label [[RETURN:%.*]]
1634 ; AMDGPU-NEXT: [[SUB:%.*]] = sub nsw i32 [[A]], 1
1635 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR8]]
1636 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR8]]
1637 ; AMDGPU-NEXT: br label [[RETURN]]
1639 ; AMDGPU-NEXT: ret void
1642 ; AMDGPU: Function Attrs: convergent noinline nounwind
1643 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
1644 ; AMDGPU-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
1645 ; AMDGPU-NEXT: entry:
1646 ; AMDGPU-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
1647 ; AMDGPU-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
1648 ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1649 ; AMDGPU-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
1650 ; AMDGPU-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
1652 ; AMDGPU-NEXT: br label [[RETURN:%.*]]
1654 ; AMDGPU-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
1655 ; AMDGPU-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
1656 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR10]]
1657 ; AMDGPU-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR10]]
1658 ; AMDGPU-NEXT: br label [[RETURN]]
1660 ; AMDGPU-NEXT: ret void
1663 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1664 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
1665 ; AMDGPU-SAME: () #[[ATTR0]] {
1666 ; AMDGPU-NEXT: entry:
1667 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8, addrspace(5)
1668 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1669 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1670 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
1671 ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1672 ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1673 ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1674 ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1675 ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1676 ; AMDGPU: is_worker_check:
1677 ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1678 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1679 ; AMDGPU: worker_state_machine.begin:
1680 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1681 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast i8* addrspace(5)* [[WORKER_WORK_FN_ADDR]] to i8**
1682 ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR_GENERIC]])
1683 ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR_GENERIC]], align 8
1684 ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
1685 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
1686 ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1687 ; AMDGPU: worker_state_machine.finished:
1688 ; AMDGPU-NEXT: ret void
1689 ; AMDGPU: worker_state_machine.is_active.check:
1690 ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1691 ; AMDGPU: worker_state_machine.parallel_region.fallback.execute:
1692 ; AMDGPU-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
1693 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1694 ; AMDGPU: worker_state_machine.parallel_region.end:
1695 ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel()
1696 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1697 ; AMDGPU: worker_state_machine.done.barrier:
1698 ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1699 ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1700 ; AMDGPU: thread.user_code.check:
1701 ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1702 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1703 ; AMDGPU: user_code.entry:
1704 ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
1705 ; AMDGPU-NEXT: call void @__omp_outlined__16(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1706 ; AMDGPU-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1707 ; AMDGPU-NEXT: ret void
1708 ; AMDGPU: worker.exit:
1709 ; AMDGPU-NEXT: ret void
1712 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1713 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__16
1714 ; AMDGPU-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1715 ; AMDGPU-NEXT: entry:
1716 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1717 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1718 ; AMDGPU-NEXT: call void @weak_callee_empty() #[[ATTR8]]
1719 ; AMDGPU-NEXT: ret void
1722 ; AMDGPU: Function Attrs: convergent noinline nounwind
1723 ; AMDGPU-LABEL: define {{[^@]+}}@weak_callee_empty
1724 ; AMDGPU-SAME: () #[[ATTR1]] {
1725 ; AMDGPU-NEXT: entry:
1726 ; AMDGPU-NEXT: ret void
1729 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1730 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__17
1731 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1732 ; AMDGPU-NEXT: entry:
1733 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1734 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1735 ; AMDGPU-NEXT: call void @p0() #[[ATTR10]]
1736 ; AMDGPU-NEXT: ret void
1739 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1740 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
1741 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1742 ; AMDGPU-NEXT: entry:
1743 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1744 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1745 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1746 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1747 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1748 ; AMDGPU-NEXT: call void @__omp_outlined__17(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1749 ; AMDGPU-NEXT: ret void
1752 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1753 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__18
1754 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1755 ; AMDGPU-NEXT: entry:
1756 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1757 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1758 ; AMDGPU-NEXT: call void @p0() #[[ATTR10]]
1759 ; AMDGPU-NEXT: ret void
1762 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1763 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
1764 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1765 ; AMDGPU-NEXT: entry:
1766 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1767 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1768 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1769 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1770 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1771 ; AMDGPU-NEXT: call void @__omp_outlined__18(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1772 ; AMDGPU-NEXT: ret void
1775 ; AMDGPU: Function Attrs: convergent noinline nounwind
1776 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
1777 ; AMDGPU-SAME: () #[[ATTR1]] {
1778 ; AMDGPU-NEXT: entry:
1779 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1780 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
1781 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1782 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
1783 ; AMDGPU-NEXT: ret void
1786 ; AMDGPU: Function Attrs: convergent noinline nounwind
1787 ; AMDGPU-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
1788 ; AMDGPU-SAME: () #[[ATTR1]] {
1789 ; AMDGPU-NEXT: entry:
1790 ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1791 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1792 ; AMDGPU-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1793 ; AMDGPU-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** [[TMP1]], i64 0)
1794 ; AMDGPU-NEXT: ret void
1797 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1798 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__19
1799 ; AMDGPU-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1800 ; AMDGPU-NEXT: entry:
1801 ; AMDGPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1802 ; AMDGPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1803 ; AMDGPU-NEXT: call void @p0() #[[ATTR10]]
1804 ; AMDGPU-NEXT: ret void
1807 ; AMDGPU: Function Attrs: convergent noinline norecurse nounwind
1808 ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
1809 ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1810 ; AMDGPU-NEXT: entry:
1811 ; AMDGPU-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1812 ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1813 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1814 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1815 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1816 ; AMDGPU-NEXT: call void @__omp_outlined__19(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1817 ; AMDGPU-NEXT: ret void
1820 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1821 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
1822 ; NVPTX-SAME: () #[[ATTR0:[0-9]+]] {
1823 ; NVPTX-NEXT: entry:
1824 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1825 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1826 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 false, i1 true)
1827 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1828 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1829 ; NVPTX: user_code.entry:
1830 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3:[0-9]+]]
1831 ; NVPTX-NEXT: call void @__omp_outlined__(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1832 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1833 ; NVPTX-NEXT: ret void
1834 ; NVPTX: worker.exit:
1835 ; NVPTX-NEXT: ret void
1838 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1839 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__
1840 ; NVPTX-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1841 ; NVPTX-NEXT: entry:
1842 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1843 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1844 ; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8:[0-9]+]]
1845 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR9:[0-9]+]]
1846 ; NVPTX-NEXT: ret void
1849 ; NVPTX: Function Attrs: convergent noinline nounwind
1850 ; NVPTX-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
1851 ; NVPTX-SAME: () #[[ATTR1:[0-9]+]] {
1852 ; NVPTX-NEXT: entry:
1853 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2:[0-9]+]]) #[[ATTR3]]
1854 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
1855 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1856 ; NVPTX-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1857 ; NVPTX: omp_if.then:
1858 ; NVPTX-NEXT: store i32 0, i32* @G, align 4
1859 ; NVPTX-NEXT: call void @__kmpc_end_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
1860 ; NVPTX-NEXT: br label [[OMP_IF_END]]
1861 ; NVPTX: omp_if.end:
1862 ; NVPTX-NEXT: call void @__kmpc_barrier(%struct.ident_t* noundef @[[GLOB3:[0-9]+]], i32 [[TMP0]]) #[[ATTR3]]
1863 ; NVPTX-NEXT: ret void
1866 ; NVPTX: Function Attrs: convergent noinline nounwind
1867 ; NVPTX-LABEL: define {{[^@]+}}@no_parallel_region_in_here
1868 ; NVPTX-SAME: () #[[ATTR1]] {
1869 ; NVPTX-NEXT: entry:
1870 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
1871 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
1872 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
1873 ; NVPTX-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1874 ; NVPTX: omp_if.then:
1875 ; NVPTX-NEXT: store i32 0, i32* @G, align 4
1876 ; NVPTX-NEXT: call void @__kmpc_end_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
1877 ; NVPTX-NEXT: br label [[OMP_IF_END]]
1878 ; NVPTX: omp_if.end:
1879 ; NVPTX-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]])
1880 ; NVPTX-NEXT: ret void
1883 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1884 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
1885 ; NVPTX-SAME: () #[[ATTR0]] {
1886 ; NVPTX-NEXT: entry:
1887 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8
1888 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1889 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
1890 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
1891 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
1892 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
1893 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
1894 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
1895 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
1896 ; NVPTX: is_worker_check:
1897 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
1898 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
1899 ; NVPTX: worker_state_machine.begin:
1900 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1901 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR]])
1902 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR]], align 8
1903 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
1904 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
1905 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
1906 ; NVPTX: worker_state_machine.finished:
1907 ; NVPTX-NEXT: ret void
1908 ; NVPTX: worker_state_machine.is_active.check:
1909 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
1910 ; NVPTX: worker_state_machine.parallel_region.check:
1911 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__2_wrapper.ID to void (i16, i32)*)
1912 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
1913 ; NVPTX: worker_state_machine.parallel_region.execute:
1914 ; NVPTX-NEXT: call void @__omp_outlined__2_wrapper(i16 0, i32 [[TMP0]])
1915 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
1916 ; NVPTX: worker_state_machine.parallel_region.check1:
1917 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
1918 ; NVPTX: worker_state_machine.parallel_region.execute2:
1919 ; NVPTX-NEXT: call void @__omp_outlined__3_wrapper(i16 0, i32 [[TMP0]])
1920 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1921 ; NVPTX: worker_state_machine.parallel_region.check3:
1922 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
1923 ; NVPTX: worker_state_machine.parallel_region.end:
1924 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
1925 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
1926 ; NVPTX: worker_state_machine.done.barrier:
1927 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
1928 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
1929 ; NVPTX: thread.user_code.check:
1930 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
1931 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1932 ; NVPTX: user_code.entry:
1933 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
1934 ; NVPTX-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
1935 ; NVPTX-NEXT: call void @__omp_outlined__1(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
1936 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
1937 ; NVPTX-NEXT: ret void
1938 ; NVPTX: worker.exit:
1939 ; NVPTX-NEXT: ret void
1942 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1943 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__1
1944 ; NVPTX-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1945 ; NVPTX-NEXT: entry:
1946 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1947 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1948 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
1949 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
1950 ; NVPTX-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1951 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
1952 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
1953 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
1954 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* noundef @__omp_outlined__2_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
1955 ; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
1956 ; NVPTX-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
1957 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* noundef @__omp_outlined__3_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
1958 ; NVPTX-NEXT: ret void
1961 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1962 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__2
1963 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1964 ; NVPTX-NEXT: entry:
1965 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1966 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1967 ; NVPTX-NEXT: call void @p0() #[[ATTR10:[0-9]+]]
1968 ; NVPTX-NEXT: ret void
1971 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1972 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
1973 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1974 ; NVPTX-NEXT: entry:
1975 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1976 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1977 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
1978 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
1979 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
1980 ; NVPTX-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
1981 ; NVPTX-NEXT: ret void
1984 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1985 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3
1986 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
1987 ; NVPTX-NEXT: entry:
1988 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1989 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1990 ; NVPTX-NEXT: call void @p1() #[[ATTR10]]
1991 ; NVPTX-NEXT: ret void
1994 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
1995 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
1996 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
1997 ; NVPTX-NEXT: entry:
1998 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
1999 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2000 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2001 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2002 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2003 ; NVPTX-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2004 ; NVPTX-NEXT: ret void
2007 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2008 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
2009 ; NVPTX-SAME: () #[[ATTR0]] {
2010 ; NVPTX-NEXT: entry:
2011 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8
2012 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2013 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2014 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
2015 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2016 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2017 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2018 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2019 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2020 ; NVPTX: is_worker_check:
2021 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2022 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2023 ; NVPTX: worker_state_machine.begin:
2024 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2025 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR]])
2026 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR]], align 8
2027 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
2028 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
2029 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2030 ; NVPTX: worker_state_machine.finished:
2031 ; NVPTX-NEXT: ret void
2032 ; NVPTX: worker_state_machine.is_active.check:
2033 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2034 ; NVPTX: worker_state_machine.parallel_region.check:
2035 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], @__omp_outlined__17_wrapper
2036 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2037 ; NVPTX: worker_state_machine.parallel_region.execute:
2038 ; NVPTX-NEXT: call void @__omp_outlined__17_wrapper(i16 0, i32 [[TMP0]])
2039 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2040 ; NVPTX: worker_state_machine.parallel_region.check1:
2041 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__5_wrapper.ID to void (i16, i32)*)
2042 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
2043 ; NVPTX: worker_state_machine.parallel_region.execute2:
2044 ; NVPTX-NEXT: call void @__omp_outlined__5_wrapper(i16 0, i32 [[TMP0]])
2045 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2046 ; NVPTX: worker_state_machine.parallel_region.check3:
2047 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE5:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK6:%.*]]
2048 ; NVPTX: worker_state_machine.parallel_region.execute5:
2049 ; NVPTX-NEXT: call void @__omp_outlined__18_wrapper(i16 0, i32 [[TMP0]])
2050 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2051 ; NVPTX: worker_state_machine.parallel_region.check6:
2052 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2053 ; NVPTX: worker_state_machine.parallel_region.end:
2054 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2055 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2056 ; NVPTX: worker_state_machine.done.barrier:
2057 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2058 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2059 ; NVPTX: thread.user_code.check:
2060 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2061 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2062 ; NVPTX: user_code.entry:
2063 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2064 ; NVPTX-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2065 ; NVPTX-NEXT: call void @__omp_outlined__4(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2066 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2067 ; NVPTX-NEXT: ret void
2068 ; NVPTX: worker.exit:
2069 ; NVPTX-NEXT: ret void
2072 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2073 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__4
2074 ; NVPTX-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2075 ; NVPTX-NEXT: entry:
2076 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2077 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2078 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2079 ; NVPTX-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2080 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
2081 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR8]]
2082 ; NVPTX-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
2083 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
2084 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2085 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__5 to i8*), i8* noundef @__omp_outlined__5_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
2086 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR8]]
2087 ; NVPTX-NEXT: ret void
2090 ; NVPTX: Function Attrs: convergent noinline nounwind
2091 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
2092 ; NVPTX-SAME: () #[[ATTR1]] {
2093 ; NVPTX-NEXT: entry:
2094 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2095 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
2096 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2097 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
2098 ; NVPTX-NEXT: ret void
2101 ; NVPTX: Function Attrs: convergent noinline nounwind
2102 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
2103 ; NVPTX-SAME: () #[[ATTR1]] {
2104 ; NVPTX-NEXT: entry:
2105 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2106 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
2107 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2108 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** [[TMP1]], i64 0)
2109 ; NVPTX-NEXT: ret void
2112 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2113 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5
2114 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2115 ; NVPTX-NEXT: entry:
2116 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2117 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2118 ; NVPTX-NEXT: call void @p1() #[[ATTR10]]
2119 ; NVPTX-NEXT: ret void
2122 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2123 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
2124 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2125 ; NVPTX-NEXT: entry:
2126 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2127 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2128 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2129 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2130 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2131 ; NVPTX-NEXT: call void @__omp_outlined__5(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2132 ; NVPTX-NEXT: ret void
2135 ; NVPTX: Function Attrs: convergent noinline nounwind
2136 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
2137 ; NVPTX-SAME: () #[[ATTR1]] {
2138 ; NVPTX-NEXT: entry:
2139 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2140 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
2141 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2142 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
2143 ; NVPTX-NEXT: ret void
2146 ; NVPTX: Function Attrs: convergent noinline nounwind
2147 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
2148 ; NVPTX-SAME: () #[[ATTR1]] {
2149 ; NVPTX-NEXT: entry:
2150 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2151 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
2152 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2153 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** [[TMP1]], i64 0)
2154 ; NVPTX-NEXT: ret void
2157 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2158 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
2159 ; NVPTX-SAME: () #[[ATTR0]] {
2160 ; NVPTX-NEXT: entry:
2161 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8
2162 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2163 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2164 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
2165 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2166 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2167 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2168 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2169 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2170 ; NVPTX: is_worker_check:
2171 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2172 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2173 ; NVPTX: worker_state_machine.begin:
2174 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2175 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR]])
2176 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR]], align 8
2177 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
2178 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
2179 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2180 ; NVPTX: worker_state_machine.finished:
2181 ; NVPTX-NEXT: ret void
2182 ; NVPTX: worker_state_machine.is_active.check:
2183 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2184 ; NVPTX: worker_state_machine.parallel_region.check:
2185 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__7_wrapper.ID to void (i16, i32)*)
2186 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2187 ; NVPTX: worker_state_machine.parallel_region.execute:
2188 ; NVPTX-NEXT: call void @__omp_outlined__7_wrapper(i16 0, i32 [[TMP0]])
2189 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2190 ; NVPTX: worker_state_machine.parallel_region.check1:
2191 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION4:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__8_wrapper.ID to void (i16, i32)*)
2192 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION4]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]]
2193 ; NVPTX: worker_state_machine.parallel_region.execute2:
2194 ; NVPTX-NEXT: call void @__omp_outlined__8_wrapper(i16 0, i32 [[TMP0]])
2195 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2196 ; NVPTX: worker_state_machine.parallel_region.fallback.execute:
2197 ; NVPTX-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
2198 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2199 ; NVPTX: worker_state_machine.parallel_region.end:
2200 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2201 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2202 ; NVPTX: worker_state_machine.done.barrier:
2203 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2204 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2205 ; NVPTX: thread.user_code.check:
2206 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2207 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2208 ; NVPTX: user_code.entry:
2209 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2210 ; NVPTX-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2211 ; NVPTX-NEXT: call void @__omp_outlined__6(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2212 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2213 ; NVPTX-NEXT: ret void
2214 ; NVPTX: worker.exit:
2215 ; NVPTX-NEXT: ret void
2218 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2219 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__6
2220 ; NVPTX-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2221 ; NVPTX-NEXT: entry:
2222 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2223 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2224 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2225 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
2226 ; NVPTX-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2227 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
2228 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2229 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__7 to i8*), i8* noundef @__omp_outlined__7_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
2230 ; NVPTX-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR10]]
2231 ; NVPTX-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
2232 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__8 to i8*), i8* noundef @__omp_outlined__8_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
2233 ; NVPTX-NEXT: ret void
2236 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2237 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7
2238 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2239 ; NVPTX-NEXT: entry:
2240 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2241 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2242 ; NVPTX-NEXT: call void @p0() #[[ATTR10]]
2243 ; NVPTX-NEXT: ret void
2246 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2247 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
2248 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2249 ; NVPTX-NEXT: entry:
2250 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2251 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2252 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2253 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2254 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2255 ; NVPTX-NEXT: call void @__omp_outlined__7(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2256 ; NVPTX-NEXT: ret void
2259 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2260 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__8
2261 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2262 ; NVPTX-NEXT: entry:
2263 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2264 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2265 ; NVPTX-NEXT: call void @p1() #[[ATTR10]]
2266 ; NVPTX-NEXT: ret void
2269 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2270 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
2271 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2272 ; NVPTX-NEXT: entry:
2273 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2274 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2275 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2276 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2277 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2278 ; NVPTX-NEXT: call void @__omp_outlined__8(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2279 ; NVPTX-NEXT: ret void
2282 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2283 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
2284 ; NVPTX-SAME: () #[[ATTR0]] {
2285 ; NVPTX-NEXT: entry:
2286 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8
2287 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2288 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2289 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
2290 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2291 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2292 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2293 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2294 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2295 ; NVPTX: is_worker_check:
2296 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2297 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2298 ; NVPTX: worker_state_machine.begin:
2299 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2300 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR]])
2301 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR]], align 8
2302 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
2303 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
2304 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2305 ; NVPTX: worker_state_machine.finished:
2306 ; NVPTX-NEXT: ret void
2307 ; NVPTX: worker_state_machine.is_active.check:
2308 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2309 ; NVPTX: worker_state_machine.parallel_region.check:
2310 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__10_wrapper.ID to void (i16, i32)*)
2311 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2312 ; NVPTX: worker_state_machine.parallel_region.execute:
2313 ; NVPTX-NEXT: call void @__omp_outlined__10_wrapper(i16 0, i32 [[TMP0]])
2314 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2315 ; NVPTX: worker_state_machine.parallel_region.check1:
2316 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
2317 ; NVPTX: worker_state_machine.parallel_region.execute2:
2318 ; NVPTX-NEXT: call void @__omp_outlined__11_wrapper(i16 0, i32 [[TMP0]])
2319 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2320 ; NVPTX: worker_state_machine.parallel_region.check3:
2321 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2322 ; NVPTX: worker_state_machine.parallel_region.end:
2323 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2324 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2325 ; NVPTX: worker_state_machine.done.barrier:
2326 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2327 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2328 ; NVPTX: thread.user_code.check:
2329 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2330 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2331 ; NVPTX: user_code.entry:
2332 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2333 ; NVPTX-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2334 ; NVPTX-NEXT: call void @__omp_outlined__9(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2335 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2336 ; NVPTX-NEXT: ret void
2337 ; NVPTX: worker.exit:
2338 ; NVPTX-NEXT: ret void
2341 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2342 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__9
2343 ; NVPTX-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2344 ; NVPTX-NEXT: entry:
2345 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2346 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2347 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2348 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
2349 ; NVPTX-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2350 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
2351 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2352 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__10 to i8*), i8* noundef @__omp_outlined__10_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
2353 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
2354 ; NVPTX-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
2355 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__11 to i8*), i8* noundef @__omp_outlined__11_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
2356 ; NVPTX-NEXT: ret void
2359 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2360 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__10
2361 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2362 ; NVPTX-NEXT: entry:
2363 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2364 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2365 ; NVPTX-NEXT: call void @p0() #[[ATTR10]]
2366 ; NVPTX-NEXT: ret void
2369 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2370 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
2371 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2372 ; NVPTX-NEXT: entry:
2373 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2374 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2375 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2376 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2377 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2378 ; NVPTX-NEXT: call void @__omp_outlined__10(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2379 ; NVPTX-NEXT: ret void
2382 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2383 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__11
2384 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2385 ; NVPTX-NEXT: entry:
2386 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2387 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2388 ; NVPTX-NEXT: call void @p1() #[[ATTR10]]
2389 ; NVPTX-NEXT: ret void
2392 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2393 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
2394 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2395 ; NVPTX-NEXT: entry:
2396 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2397 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2398 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2399 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2400 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2401 ; NVPTX-NEXT: call void @__omp_outlined__11(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2402 ; NVPTX-NEXT: ret void
2405 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2406 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
2407 ; NVPTX-SAME: () #[[ATTR0]] {
2408 ; NVPTX-NEXT: entry:
2409 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8
2410 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2411 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2412 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
2413 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2414 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2415 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2416 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2417 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2418 ; NVPTX: is_worker_check:
2419 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2420 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2421 ; NVPTX: worker_state_machine.begin:
2422 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2423 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR]])
2424 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR]], align 8
2425 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
2426 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
2427 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2428 ; NVPTX: worker_state_machine.finished:
2429 ; NVPTX-NEXT: ret void
2430 ; NVPTX: worker_state_machine.is_active.check:
2431 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2432 ; NVPTX: worker_state_machine.parallel_region.check:
2433 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], bitcast (i8* @__omp_outlined__13_wrapper.ID to void (i16, i32)*)
2434 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK1:%.*]]
2435 ; NVPTX: worker_state_machine.parallel_region.execute:
2436 ; NVPTX-NEXT: call void @__omp_outlined__13_wrapper(i16 0, i32 [[TMP0]])
2437 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2438 ; NVPTX: worker_state_machine.parallel_region.check1:
2439 ; NVPTX-NEXT: br i1 true, label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE2:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK3:%.*]]
2440 ; NVPTX: worker_state_machine.parallel_region.execute2:
2441 ; NVPTX-NEXT: call void @__omp_outlined__14_wrapper(i16 0, i32 [[TMP0]])
2442 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2443 ; NVPTX: worker_state_machine.parallel_region.check3:
2444 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2445 ; NVPTX: worker_state_machine.parallel_region.end:
2446 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2447 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2448 ; NVPTX: worker_state_machine.done.barrier:
2449 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2450 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2451 ; NVPTX: thread.user_code.check:
2452 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2453 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2454 ; NVPTX: user_code.entry:
2455 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2456 ; NVPTX-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2457 ; NVPTX-NEXT: call void @__omp_outlined__12(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2458 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2459 ; NVPTX-NEXT: ret void
2460 ; NVPTX: worker.exit:
2461 ; NVPTX-NEXT: ret void
2464 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2465 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__12
2466 ; NVPTX-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2467 ; NVPTX-NEXT: entry:
2468 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2469 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2470 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2471 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
2472 ; NVPTX-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2473 ; NVPTX-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
2474 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
2475 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2476 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__13 to i8*), i8* noundef @__omp_outlined__13_wrapper.ID, i8** noundef [[TMP1]], i64 noundef 0)
2477 ; NVPTX-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
2478 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__14 to i8*), i8* noundef @__omp_outlined__14_wrapper.ID, i8** noundef [[TMP2]], i64 noundef 0)
2479 ; NVPTX-NEXT: ret void
2482 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2483 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__13
2484 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2485 ; NVPTX-NEXT: entry:
2486 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2487 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2488 ; NVPTX-NEXT: call void @p0() #[[ATTR10]]
2489 ; NVPTX-NEXT: ret void
2492 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2493 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
2494 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2495 ; NVPTX-NEXT: entry:
2496 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2497 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2498 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2499 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2500 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2501 ; NVPTX-NEXT: call void @__omp_outlined__13(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2502 ; NVPTX-NEXT: ret void
2505 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2506 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__14
2507 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2508 ; NVPTX-NEXT: entry:
2509 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2510 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2511 ; NVPTX-NEXT: call void @p1() #[[ATTR10]]
2512 ; NVPTX-NEXT: ret void
2515 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2516 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
2517 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2518 ; NVPTX-NEXT: entry:
2519 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2520 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2521 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2522 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2523 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2524 ; NVPTX-NEXT: call void @__omp_outlined__14(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2525 ; NVPTX-NEXT: ret void
2528 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2529 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
2530 ; NVPTX-SAME: () #[[ATTR0]] {
2531 ; NVPTX-NEXT: entry:
2532 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8
2533 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2534 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2535 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
2536 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2537 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2538 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2539 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2540 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2541 ; NVPTX: is_worker_check:
2542 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2543 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2544 ; NVPTX: worker_state_machine.begin:
2545 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2546 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR]])
2547 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR]], align 8
2548 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
2549 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
2550 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2551 ; NVPTX: worker_state_machine.finished:
2552 ; NVPTX-NEXT: ret void
2553 ; NVPTX: worker_state_machine.is_active.check:
2554 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_CHECK:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2555 ; NVPTX: worker_state_machine.parallel_region.check:
2556 ; NVPTX-NEXT: [[WORKER_CHECK_PARALLEL_REGION:%.*]] = icmp eq void (i16, i32)* [[WORKER_WORK_FN_ADDR_CAST]], @__omp_outlined__19_wrapper
2557 ; NVPTX-NEXT: br i1 [[WORKER_CHECK_PARALLEL_REGION]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]]
2558 ; NVPTX: worker_state_machine.parallel_region.execute:
2559 ; NVPTX-NEXT: call void @__omp_outlined__19_wrapper(i16 0, i32 [[TMP0]])
2560 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2561 ; NVPTX: worker_state_machine.parallel_region.fallback.execute:
2562 ; NVPTX-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
2563 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END]]
2564 ; NVPTX: worker_state_machine.parallel_region.end:
2565 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2566 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2567 ; NVPTX: worker_state_machine.done.barrier:
2568 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2569 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2570 ; NVPTX: thread.user_code.check:
2571 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2572 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2573 ; NVPTX: user_code.entry:
2574 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2575 ; NVPTX-NEXT: call void @__omp_outlined__15(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2576 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2577 ; NVPTX-NEXT: ret void
2578 ; NVPTX: worker.exit:
2579 ; NVPTX-NEXT: ret void
2582 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2583 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__15
2584 ; NVPTX-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2585 ; NVPTX-NEXT: entry:
2586 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2587 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2588 ; NVPTX-NEXT: [[CALL:%.*]] = call i32 bitcast (i32 (...)* @omp_get_thread_num to i32 ()*)() #[[ATTR10]]
2589 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR8]]
2590 ; NVPTX-NEXT: ret void
2593 ; NVPTX: Function Attrs: convergent noinline nounwind
2594 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
2595 ; NVPTX-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
2596 ; NVPTX-NEXT: entry:
2597 ; NVPTX-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2598 ; NVPTX-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
2599 ; NVPTX-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
2600 ; NVPTX-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2602 ; NVPTX-NEXT: br label [[RETURN:%.*]]
2604 ; NVPTX-NEXT: [[SUB:%.*]] = sub nsw i32 [[A]], 1
2605 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR8]]
2606 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR8]]
2607 ; NVPTX-NEXT: br label [[RETURN]]
2609 ; NVPTX-NEXT: ret void
2612 ; NVPTX: Function Attrs: convergent noinline nounwind
2613 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
2614 ; NVPTX-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
2615 ; NVPTX-NEXT: entry:
2616 ; NVPTX-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
2617 ; NVPTX-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
2618 ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2619 ; NVPTX-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
2620 ; NVPTX-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
2622 ; NVPTX-NEXT: br label [[RETURN:%.*]]
2624 ; NVPTX-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
2625 ; NVPTX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
2626 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR10]]
2627 ; NVPTX-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR10]]
2628 ; NVPTX-NEXT: br label [[RETURN]]
2630 ; NVPTX-NEXT: ret void
2633 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2634 ; NVPTX-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
2635 ; NVPTX-SAME: () #[[ATTR0]] {
2636 ; NVPTX-NEXT: entry:
2637 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca i8*, align 8
2638 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2639 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2640 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 false, i1 true)
2641 ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
2642 ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size()
2643 ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]]
2644 ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]]
2645 ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]]
2646 ; NVPTX: is_worker_check:
2647 ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1
2648 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]]
2649 ; NVPTX: worker_state_machine.begin:
2650 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2651 ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORKER_WORK_FN_ADDR]])
2652 ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load i8*, i8** [[WORKER_WORK_FN_ADDR]], align 8
2653 ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR_CAST:%.*]] = bitcast i8* [[WORKER_WORK_FN]] to void (i16, i32)*
2654 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq i8* [[WORKER_WORK_FN]], null
2655 ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]]
2656 ; NVPTX: worker_state_machine.finished:
2657 ; NVPTX-NEXT: ret void
2658 ; NVPTX: worker_state_machine.is_active.check:
2659 ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]]
2660 ; NVPTX: worker_state_machine.parallel_region.fallback.execute:
2661 ; NVPTX-NEXT: call void [[WORKER_WORK_FN_ADDR_CAST]](i16 0, i32 [[TMP0]])
2662 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]]
2663 ; NVPTX: worker_state_machine.parallel_region.end:
2664 ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel()
2665 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]]
2666 ; NVPTX: worker_state_machine.done.barrier:
2667 ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
2668 ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]]
2669 ; NVPTX: thread.user_code.check:
2670 ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2671 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2672 ; NVPTX: user_code.entry:
2673 ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2674 ; NVPTX-NEXT: call void @__omp_outlined__16(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2675 ; NVPTX-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2676 ; NVPTX-NEXT: ret void
2677 ; NVPTX: worker.exit:
2678 ; NVPTX-NEXT: ret void
2681 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2682 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__16
2683 ; NVPTX-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2684 ; NVPTX-NEXT: entry:
2685 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2686 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2687 ; NVPTX-NEXT: call void @weak_callee_empty() #[[ATTR8]]
2688 ; NVPTX-NEXT: ret void
2691 ; NVPTX: Function Attrs: convergent noinline nounwind
2692 ; NVPTX-LABEL: define {{[^@]+}}@weak_callee_empty
2693 ; NVPTX-SAME: () #[[ATTR1]] {
2694 ; NVPTX-NEXT: entry:
2695 ; NVPTX-NEXT: ret void
2698 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2699 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__17
2700 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2701 ; NVPTX-NEXT: entry:
2702 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2703 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2704 ; NVPTX-NEXT: call void @p0() #[[ATTR10]]
2705 ; NVPTX-NEXT: ret void
2708 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2709 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
2710 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2711 ; NVPTX-NEXT: entry:
2712 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2713 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2714 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2715 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2716 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2717 ; NVPTX-NEXT: call void @__omp_outlined__17(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2718 ; NVPTX-NEXT: ret void
2721 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2722 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__18
2723 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2724 ; NVPTX-NEXT: entry:
2725 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2726 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2727 ; NVPTX-NEXT: call void @p0() #[[ATTR10]]
2728 ; NVPTX-NEXT: ret void
2731 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2732 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
2733 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2734 ; NVPTX-NEXT: entry:
2735 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2736 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2737 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2738 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2739 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2740 ; NVPTX-NEXT: call void @__omp_outlined__18(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2741 ; NVPTX-NEXT: ret void
2744 ; NVPTX: Function Attrs: convergent noinline nounwind
2745 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
2746 ; NVPTX-SAME: () #[[ATTR1]] {
2747 ; NVPTX-NEXT: entry:
2748 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2749 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
2750 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2751 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
2752 ; NVPTX-NEXT: ret void
2755 ; NVPTX: Function Attrs: convergent noinline nounwind
2756 ; NVPTX-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
2757 ; NVPTX-SAME: () #[[ATTR1]] {
2758 ; NVPTX-NEXT: entry:
2759 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2760 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
2761 ; NVPTX-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2762 ; NVPTX-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** [[TMP1]], i64 0)
2763 ; NVPTX-NEXT: ret void
2766 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2767 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__19
2768 ; NVPTX-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2769 ; NVPTX-NEXT: entry:
2770 ; NVPTX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2771 ; NVPTX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2772 ; NVPTX-NEXT: call void @p0() #[[ATTR10]]
2773 ; NVPTX-NEXT: ret void
2776 ; NVPTX: Function Attrs: convergent noinline norecurse nounwind
2777 ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
2778 ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2779 ; NVPTX-NEXT: entry:
2780 ; NVPTX-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2781 ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2782 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2783 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2784 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2785 ; NVPTX-NEXT: call void @__omp_outlined__19(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2786 ; NVPTX-NEXT: ret void
2789 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2790 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
2791 ; AMDGPU-DISABLED-SAME: () #[[ATTR0:[0-9]+]] {
2792 ; AMDGPU-DISABLED-NEXT: entry:
2793 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2794 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2795 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
2796 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2797 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2798 ; AMDGPU-DISABLED: user_code.entry:
2799 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3:[0-9]+]]
2800 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2801 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2802 ; AMDGPU-DISABLED-NEXT: ret void
2803 ; AMDGPU-DISABLED: worker.exit:
2804 ; AMDGPU-DISABLED-NEXT: ret void
2807 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2808 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__
2809 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2810 ; AMDGPU-DISABLED-NEXT: entry:
2811 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2812 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2813 ; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8:[0-9]+]]
2814 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9:[0-9]+]]
2815 ; AMDGPU-DISABLED-NEXT: ret void
2818 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2819 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
2820 ; AMDGPU-DISABLED-SAME: () #[[ATTR1:[0-9]+]] {
2821 ; AMDGPU-DISABLED-NEXT: entry:
2822 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2:[0-9]+]]) #[[ATTR3]]
2823 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2824 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2825 ; AMDGPU-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2826 ; AMDGPU-DISABLED: omp_if.then:
2827 ; AMDGPU-DISABLED-NEXT: store i32 0, i32* @G, align 4
2828 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_end_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
2829 ; AMDGPU-DISABLED-NEXT: br label [[OMP_IF_END]]
2830 ; AMDGPU-DISABLED: omp_if.end:
2831 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_barrier(%struct.ident_t* noundef @[[GLOB3:[0-9]+]], i32 [[TMP0]]) #[[ATTR3]]
2832 ; AMDGPU-DISABLED-NEXT: ret void
2835 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2836 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here
2837 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
2838 ; AMDGPU-DISABLED-NEXT: entry:
2839 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
2840 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
2841 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
2842 ; AMDGPU-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
2843 ; AMDGPU-DISABLED: omp_if.then:
2844 ; AMDGPU-DISABLED-NEXT: store i32 0, i32* @G, align 4
2845 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_end_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
2846 ; AMDGPU-DISABLED-NEXT: br label [[OMP_IF_END]]
2847 ; AMDGPU-DISABLED: omp_if.end:
2848 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]])
2849 ; AMDGPU-DISABLED-NEXT: ret void
2852 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2853 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
2854 ; AMDGPU-DISABLED-SAME: () #[[ATTR0]] {
2855 ; AMDGPU-DISABLED-NEXT: entry:
2856 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2857 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2858 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
2859 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2860 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2861 ; AMDGPU-DISABLED: user_code.entry:
2862 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2863 ; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2864 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__1(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2865 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2866 ; AMDGPU-DISABLED-NEXT: ret void
2867 ; AMDGPU-DISABLED: worker.exit:
2868 ; AMDGPU-DISABLED-NEXT: ret void
2871 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2872 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__1
2873 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2874 ; AMDGPU-DISABLED-NEXT: entry:
2875 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2876 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2877 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2878 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
2879 ; AMDGPU-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2880 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
2881 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
2882 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2883 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
2884 ; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
2885 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
2886 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
2887 ; AMDGPU-DISABLED-NEXT: ret void
2890 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2891 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2
2892 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2893 ; AMDGPU-DISABLED-NEXT: entry:
2894 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2895 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2896 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR10:[0-9]+]]
2897 ; AMDGPU-DISABLED-NEXT: ret void
2900 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2901 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
2902 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2903 ; AMDGPU-DISABLED-NEXT: entry:
2904 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2905 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2906 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2907 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2908 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2909 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2910 ; AMDGPU-DISABLED-NEXT: ret void
2913 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2914 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3
2915 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2916 ; AMDGPU-DISABLED-NEXT: entry:
2917 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2918 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2919 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR10]]
2920 ; AMDGPU-DISABLED-NEXT: ret void
2923 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2924 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
2925 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
2926 ; AMDGPU-DISABLED-NEXT: entry:
2927 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
2928 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2929 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2930 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
2931 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
2932 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
2933 ; AMDGPU-DISABLED-NEXT: ret void
2936 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2937 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
2938 ; AMDGPU-DISABLED-SAME: () #[[ATTR0]] {
2939 ; AMDGPU-DISABLED-NEXT: entry:
2940 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
2941 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
2942 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
2943 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
2944 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
2945 ; AMDGPU-DISABLED: user_code.entry:
2946 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
2947 ; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
2948 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__4(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
2949 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
2950 ; AMDGPU-DISABLED-NEXT: ret void
2951 ; AMDGPU-DISABLED: worker.exit:
2952 ; AMDGPU-DISABLED-NEXT: ret void
2955 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2956 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__4
2957 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2958 ; AMDGPU-DISABLED-NEXT: entry:
2959 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2960 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2961 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2962 ; AMDGPU-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2963 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
2964 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR8]]
2965 ; AMDGPU-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
2966 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
2967 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2968 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__5 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__5_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
2969 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR8]]
2970 ; AMDGPU-DISABLED-NEXT: ret void
2973 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2974 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
2975 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
2976 ; AMDGPU-DISABLED-NEXT: entry:
2977 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2978 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
2979 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2980 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
2981 ; AMDGPU-DISABLED-NEXT: ret void
2984 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
2985 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
2986 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
2987 ; AMDGPU-DISABLED-NEXT: entry:
2988 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
2989 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
2990 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
2991 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** [[TMP1]], i64 0)
2992 ; AMDGPU-DISABLED-NEXT: ret void
2995 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
2996 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5
2997 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2998 ; AMDGPU-DISABLED-NEXT: entry:
2999 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3000 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3001 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3002 ; AMDGPU-DISABLED-NEXT: ret void
3005 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3006 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
3007 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3008 ; AMDGPU-DISABLED-NEXT: entry:
3009 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3010 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3011 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3012 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3013 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3014 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__5(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3015 ; AMDGPU-DISABLED-NEXT: ret void
3018 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3019 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
3020 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
3021 ; AMDGPU-DISABLED-NEXT: entry:
3022 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3023 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
3024 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3025 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3026 ; AMDGPU-DISABLED-NEXT: ret void
3029 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3030 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
3031 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
3032 ; AMDGPU-DISABLED-NEXT: entry:
3033 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3034 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
3035 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3036 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** [[TMP1]], i64 0)
3037 ; AMDGPU-DISABLED-NEXT: ret void
3040 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3041 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
3042 ; AMDGPU-DISABLED-SAME: () #[[ATTR0]] {
3043 ; AMDGPU-DISABLED-NEXT: entry:
3044 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3045 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3046 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3047 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3048 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3049 ; AMDGPU-DISABLED: user_code.entry:
3050 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3051 ; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3052 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__6(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3053 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3054 ; AMDGPU-DISABLED-NEXT: ret void
3055 ; AMDGPU-DISABLED: worker.exit:
3056 ; AMDGPU-DISABLED-NEXT: ret void
3059 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3060 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__6
3061 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3062 ; AMDGPU-DISABLED-NEXT: entry:
3063 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3064 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3065 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3066 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
3067 ; AMDGPU-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3068 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3069 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3070 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__7 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__7_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3071 ; AMDGPU-DISABLED-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR10]]
3072 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
3073 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__8 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__8_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
3074 ; AMDGPU-DISABLED-NEXT: ret void
3077 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3078 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7
3079 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3080 ; AMDGPU-DISABLED-NEXT: entry:
3081 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3082 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3083 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3084 ; AMDGPU-DISABLED-NEXT: ret void
3087 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3088 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
3089 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3090 ; AMDGPU-DISABLED-NEXT: entry:
3091 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3092 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3093 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3094 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3095 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3096 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__7(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3097 ; AMDGPU-DISABLED-NEXT: ret void
3100 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3101 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8
3102 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3103 ; AMDGPU-DISABLED-NEXT: entry:
3104 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3105 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3106 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3107 ; AMDGPU-DISABLED-NEXT: ret void
3110 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3111 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
3112 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3113 ; AMDGPU-DISABLED-NEXT: entry:
3114 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3115 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3116 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3117 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3118 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3119 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__8(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3120 ; AMDGPU-DISABLED-NEXT: ret void
3123 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3124 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
3125 ; AMDGPU-DISABLED-SAME: () #[[ATTR0]] {
3126 ; AMDGPU-DISABLED-NEXT: entry:
3127 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3128 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3129 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3130 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3131 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3132 ; AMDGPU-DISABLED: user_code.entry:
3133 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3134 ; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3135 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__9(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3136 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3137 ; AMDGPU-DISABLED-NEXT: ret void
3138 ; AMDGPU-DISABLED: worker.exit:
3139 ; AMDGPU-DISABLED-NEXT: ret void
3142 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3143 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__9
3144 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3145 ; AMDGPU-DISABLED-NEXT: entry:
3146 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3147 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3148 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3149 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
3150 ; AMDGPU-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3151 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3152 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3153 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__10 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__10_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3154 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
3155 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
3156 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__11 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__11_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
3157 ; AMDGPU-DISABLED-NEXT: ret void
3160 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3161 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10
3162 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3163 ; AMDGPU-DISABLED-NEXT: entry:
3164 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3165 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3166 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3167 ; AMDGPU-DISABLED-NEXT: ret void
3170 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3171 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
3172 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3173 ; AMDGPU-DISABLED-NEXT: entry:
3174 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3175 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3176 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3177 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3178 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3179 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__10(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3180 ; AMDGPU-DISABLED-NEXT: ret void
3183 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3184 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11
3185 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3186 ; AMDGPU-DISABLED-NEXT: entry:
3187 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3188 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3189 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3190 ; AMDGPU-DISABLED-NEXT: ret void
3193 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3194 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
3195 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3196 ; AMDGPU-DISABLED-NEXT: entry:
3197 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3198 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3199 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3200 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3201 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3202 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__11(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3203 ; AMDGPU-DISABLED-NEXT: ret void
3206 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3207 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
3208 ; AMDGPU-DISABLED-SAME: () #[[ATTR0]] {
3209 ; AMDGPU-DISABLED-NEXT: entry:
3210 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3211 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3212 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3213 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3214 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3215 ; AMDGPU-DISABLED: user_code.entry:
3216 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3217 ; AMDGPU-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3218 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__12(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3219 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3220 ; AMDGPU-DISABLED-NEXT: ret void
3221 ; AMDGPU-DISABLED: worker.exit:
3222 ; AMDGPU-DISABLED-NEXT: ret void
3225 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3226 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__12
3227 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3228 ; AMDGPU-DISABLED-NEXT: entry:
3229 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3230 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3231 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3232 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
3233 ; AMDGPU-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3234 ; AMDGPU-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
3235 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3236 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3237 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__13 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__13_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3238 ; AMDGPU-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
3239 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__14 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__14_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
3240 ; AMDGPU-DISABLED-NEXT: ret void
3243 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3244 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13
3245 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3246 ; AMDGPU-DISABLED-NEXT: entry:
3247 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3248 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3249 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3250 ; AMDGPU-DISABLED-NEXT: ret void
3253 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3254 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
3255 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3256 ; AMDGPU-DISABLED-NEXT: entry:
3257 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3258 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3259 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3260 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3261 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3262 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__13(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3263 ; AMDGPU-DISABLED-NEXT: ret void
3266 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3267 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14
3268 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3269 ; AMDGPU-DISABLED-NEXT: entry:
3270 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3271 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3272 ; AMDGPU-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3273 ; AMDGPU-DISABLED-NEXT: ret void
3276 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3277 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
3278 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3279 ; AMDGPU-DISABLED-NEXT: entry:
3280 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3281 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3282 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3283 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3284 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3285 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__14(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3286 ; AMDGPU-DISABLED-NEXT: ret void
3289 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3290 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
3291 ; AMDGPU-DISABLED-SAME: () #[[ATTR0]] {
3292 ; AMDGPU-DISABLED-NEXT: entry:
3293 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3294 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3295 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3296 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3297 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3298 ; AMDGPU-DISABLED: user_code.entry:
3299 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3300 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__15(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3301 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3302 ; AMDGPU-DISABLED-NEXT: ret void
3303 ; AMDGPU-DISABLED: worker.exit:
3304 ; AMDGPU-DISABLED-NEXT: ret void
3307 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3308 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__15
3309 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3310 ; AMDGPU-DISABLED-NEXT: entry:
3311 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3312 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3313 ; AMDGPU-DISABLED-NEXT: [[CALL:%.*]] = call i32 bitcast (i32 (...)* @omp_get_thread_num to i32 ()*)() #[[ATTR10]]
3314 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR8]]
3315 ; AMDGPU-DISABLED-NEXT: ret void
3318 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3319 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
3320 ; AMDGPU-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
3321 ; AMDGPU-DISABLED-NEXT: entry:
3322 ; AMDGPU-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3323 ; AMDGPU-DISABLED-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
3324 ; AMDGPU-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
3325 ; AMDGPU-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3326 ; AMDGPU-DISABLED: if.then:
3327 ; AMDGPU-DISABLED-NEXT: br label [[RETURN:%.*]]
3328 ; AMDGPU-DISABLED: if.end:
3329 ; AMDGPU-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[A]], 1
3330 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR8]]
3331 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR8]]
3332 ; AMDGPU-DISABLED-NEXT: br label [[RETURN]]
3333 ; AMDGPU-DISABLED: return:
3334 ; AMDGPU-DISABLED-NEXT: ret void
3337 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3338 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
3339 ; AMDGPU-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
3340 ; AMDGPU-DISABLED-NEXT: entry:
3341 ; AMDGPU-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
3342 ; AMDGPU-DISABLED-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
3343 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
3344 ; AMDGPU-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
3345 ; AMDGPU-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
3346 ; AMDGPU-DISABLED: if.then:
3347 ; AMDGPU-DISABLED-NEXT: br label [[RETURN:%.*]]
3348 ; AMDGPU-DISABLED: if.end:
3349 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
3350 ; AMDGPU-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
3351 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR10]]
3352 ; AMDGPU-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR10]]
3353 ; AMDGPU-DISABLED-NEXT: br label [[RETURN]]
3354 ; AMDGPU-DISABLED: return:
3355 ; AMDGPU-DISABLED-NEXT: ret void
3358 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3359 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
3360 ; AMDGPU-DISABLED-SAME: () #[[ATTR0]] {
3361 ; AMDGPU-DISABLED-NEXT: entry:
3362 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3363 ; AMDGPU-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3364 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3365 ; AMDGPU-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3366 ; AMDGPU-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3367 ; AMDGPU-DISABLED: user_code.entry:
3368 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3369 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__16(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3370 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3371 ; AMDGPU-DISABLED-NEXT: ret void
3372 ; AMDGPU-DISABLED: worker.exit:
3373 ; AMDGPU-DISABLED-NEXT: ret void
3376 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3377 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__16
3378 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3379 ; AMDGPU-DISABLED-NEXT: entry:
3380 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3381 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3382 ; AMDGPU-DISABLED-NEXT: call void @weak_callee_empty() #[[ATTR8]]
3383 ; AMDGPU-DISABLED-NEXT: ret void
3386 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3387 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@weak_callee_empty
3388 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
3389 ; AMDGPU-DISABLED-NEXT: entry:
3390 ; AMDGPU-DISABLED-NEXT: ret void
3393 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3394 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17
3395 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3396 ; AMDGPU-DISABLED-NEXT: entry:
3397 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3398 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3399 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3400 ; AMDGPU-DISABLED-NEXT: ret void
3403 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3404 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
3405 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3406 ; AMDGPU-DISABLED-NEXT: entry:
3407 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3408 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3409 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3410 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3411 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3412 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__17(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3413 ; AMDGPU-DISABLED-NEXT: ret void
3416 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3417 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18
3418 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3419 ; AMDGPU-DISABLED-NEXT: entry:
3420 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3421 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3422 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3423 ; AMDGPU-DISABLED-NEXT: ret void
3426 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3427 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
3428 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3429 ; AMDGPU-DISABLED-NEXT: entry:
3430 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3431 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3432 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3433 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3434 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3435 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__18(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3436 ; AMDGPU-DISABLED-NEXT: ret void
3439 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3440 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
3441 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
3442 ; AMDGPU-DISABLED-NEXT: entry:
3443 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3444 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
3445 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3446 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3447 ; AMDGPU-DISABLED-NEXT: ret void
3450 ; AMDGPU-DISABLED: Function Attrs: convergent noinline nounwind
3451 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
3452 ; AMDGPU-DISABLED-SAME: () #[[ATTR1]] {
3453 ; AMDGPU-DISABLED-NEXT: entry:
3454 ; AMDGPU-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3455 ; AMDGPU-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
3456 ; AMDGPU-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3457 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** [[TMP1]], i64 0)
3458 ; AMDGPU-DISABLED-NEXT: ret void
3461 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3462 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19
3463 ; AMDGPU-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3464 ; AMDGPU-DISABLED-NEXT: entry:
3465 ; AMDGPU-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3466 ; AMDGPU-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3467 ; AMDGPU-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3468 ; AMDGPU-DISABLED-NEXT: ret void
3471 ; AMDGPU-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3472 ; AMDGPU-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
3473 ; AMDGPU-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3474 ; AMDGPU-DISABLED-NEXT: entry:
3475 ; AMDGPU-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3476 ; AMDGPU-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3477 ; AMDGPU-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3478 ; AMDGPU-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3479 ; AMDGPU-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3480 ; AMDGPU-DISABLED-NEXT: call void @__omp_outlined__19(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3481 ; AMDGPU-DISABLED-NEXT: ret void
3484 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3485 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_needed_l14
3486 ; NVPTX-DISABLED-SAME: () #[[ATTR0:[0-9]+]] {
3487 ; NVPTX-DISABLED-NEXT: entry:
3488 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3489 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3490 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
3491 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3492 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3493 ; NVPTX-DISABLED: user_code.entry:
3494 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3:[0-9]+]]
3495 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3496 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3497 ; NVPTX-DISABLED-NEXT: ret void
3498 ; NVPTX-DISABLED: worker.exit:
3499 ; NVPTX-DISABLED-NEXT: ret void
3502 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3503 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__
3504 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3505 ; NVPTX-DISABLED-NEXT: entry:
3506 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3507 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3508 ; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8:[0-9]+]]
3509 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9:[0-9]+]]
3510 ; NVPTX-DISABLED-NEXT: ret void
3513 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3514 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here.internalized
3515 ; NVPTX-DISABLED-SAME: () #[[ATTR1:[0-9]+]] {
3516 ; NVPTX-DISABLED-NEXT: entry:
3517 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2:[0-9]+]]) #[[ATTR3]]
3518 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
3519 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
3520 ; NVPTX-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
3521 ; NVPTX-DISABLED: omp_if.then:
3522 ; NVPTX-DISABLED-NEXT: store i32 0, i32* @G, align 4
3523 ; NVPTX-DISABLED-NEXT: call void @__kmpc_end_single(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]]) #[[ATTR3]]
3524 ; NVPTX-DISABLED-NEXT: br label [[OMP_IF_END]]
3525 ; NVPTX-DISABLED: omp_if.end:
3526 ; NVPTX-DISABLED-NEXT: call void @__kmpc_barrier(%struct.ident_t* noundef @[[GLOB3:[0-9]+]], i32 [[TMP0]]) #[[ATTR3]]
3527 ; NVPTX-DISABLED-NEXT: ret void
3530 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3531 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@no_parallel_region_in_here
3532 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3533 ; NVPTX-DISABLED-NEXT: entry:
3534 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
3535 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
3536 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
3537 ; NVPTX-DISABLED-NEXT: br i1 [[TMP2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
3538 ; NVPTX-DISABLED: omp_if.then:
3539 ; NVPTX-DISABLED-NEXT: store i32 0, i32* @G, align 4
3540 ; NVPTX-DISABLED-NEXT: call void @__kmpc_end_single(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]])
3541 ; NVPTX-DISABLED-NEXT: br label [[OMP_IF_END]]
3542 ; NVPTX-DISABLED: omp_if.end:
3543 ; NVPTX-DISABLED-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]])
3544 ; NVPTX-DISABLED-NEXT: ret void
3547 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3548 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_l22
3549 ; NVPTX-DISABLED-SAME: () #[[ATTR0]] {
3550 ; NVPTX-DISABLED-NEXT: entry:
3551 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3552 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3553 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3554 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3555 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3556 ; NVPTX-DISABLED: user_code.entry:
3557 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3558 ; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3559 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__1(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3560 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3561 ; NVPTX-DISABLED-NEXT: ret void
3562 ; NVPTX-DISABLED: worker.exit:
3563 ; NVPTX-DISABLED-NEXT: ret void
3566 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3567 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__1
3568 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3569 ; NVPTX-DISABLED-NEXT: entry:
3570 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3571 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3572 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3573 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
3574 ; NVPTX-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3575 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
3576 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3577 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3578 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3579 ; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
3580 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
3581 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
3582 ; NVPTX-DISABLED-NEXT: ret void
3585 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3586 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2
3587 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3588 ; NVPTX-DISABLED-NEXT: entry:
3589 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3590 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3591 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR10:[0-9]+]]
3592 ; NVPTX-DISABLED-NEXT: ret void
3595 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3596 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper
3597 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3598 ; NVPTX-DISABLED-NEXT: entry:
3599 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3600 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3601 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3602 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3603 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3604 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3605 ; NVPTX-DISABLED-NEXT: ret void
3608 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3609 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3
3610 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3611 ; NVPTX-DISABLED-NEXT: entry:
3612 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3613 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3614 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3615 ; NVPTX-DISABLED-NEXT: ret void
3618 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3619 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper
3620 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3621 ; NVPTX-DISABLED-NEXT: entry:
3622 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3623 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3624 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3625 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3626 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3627 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3628 ; NVPTX-DISABLED-NEXT: ret void
3631 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3632 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_l39
3633 ; NVPTX-DISABLED-SAME: () #[[ATTR0]] {
3634 ; NVPTX-DISABLED-NEXT: entry:
3635 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3636 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3637 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3638 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3639 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3640 ; NVPTX-DISABLED: user_code.entry:
3641 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3642 ; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3643 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__4(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3644 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3645 ; NVPTX-DISABLED-NEXT: ret void
3646 ; NVPTX-DISABLED: worker.exit:
3647 ; NVPTX-DISABLED-NEXT: ret void
3650 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3651 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__4
3652 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3653 ; NVPTX-DISABLED-NEXT: entry:
3654 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3655 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3656 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3657 ; NVPTX-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3658 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
3659 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_before.internalized() #[[ATTR8]]
3660 ; NVPTX-DISABLED-NEXT: call void @no_parallel_region_in_here.internalized() #[[ATTR8]]
3661 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3662 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3663 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__5 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__5_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3664 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_after.internalized() #[[ATTR8]]
3665 ; NVPTX-DISABLED-NEXT: ret void
3668 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3669 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before.internalized
3670 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3671 ; NVPTX-DISABLED-NEXT: entry:
3672 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3673 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
3674 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3675 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3676 ; NVPTX-DISABLED-NEXT: ret void
3679 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3680 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_before
3681 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3682 ; NVPTX-DISABLED-NEXT: entry:
3683 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3684 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
3685 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3686 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__17 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__17_wrapper to i8*), i8** [[TMP1]], i64 0)
3687 ; NVPTX-DISABLED-NEXT: ret void
3690 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3691 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5
3692 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3693 ; NVPTX-DISABLED-NEXT: entry:
3694 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3695 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3696 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3697 ; NVPTX-DISABLED-NEXT: ret void
3700 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3701 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper
3702 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3703 ; NVPTX-DISABLED-NEXT: entry:
3704 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3705 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3706 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3707 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3708 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3709 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__5(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3710 ; NVPTX-DISABLED-NEXT: ret void
3713 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3714 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after.internalized
3715 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3716 ; NVPTX-DISABLED-NEXT: entry:
3717 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3718 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
3719 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3720 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3721 ; NVPTX-DISABLED-NEXT: ret void
3724 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
3725 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_after
3726 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
3727 ; NVPTX-DISABLED-NEXT: entry:
3728 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3729 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
3730 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3731 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__18 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__18_wrapper to i8*), i8** [[TMP1]], i64 0)
3732 ; NVPTX-DISABLED-NEXT: ret void
3735 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3736 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_with_fallback_l55
3737 ; NVPTX-DISABLED-SAME: () #[[ATTR0]] {
3738 ; NVPTX-DISABLED-NEXT: entry:
3739 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3740 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3741 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3742 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3743 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3744 ; NVPTX-DISABLED: user_code.entry:
3745 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3746 ; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3747 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__6(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3748 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3749 ; NVPTX-DISABLED-NEXT: ret void
3750 ; NVPTX-DISABLED: worker.exit:
3751 ; NVPTX-DISABLED-NEXT: ret void
3754 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3755 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__6
3756 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3757 ; NVPTX-DISABLED-NEXT: entry:
3758 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3759 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3760 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3761 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
3762 ; NVPTX-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3763 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3764 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3765 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__7 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__7_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3766 ; NVPTX-DISABLED-NEXT: [[CALL:%.*]] = call i32 @unknown() #[[ATTR10]]
3767 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
3768 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__8 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__8_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
3769 ; NVPTX-DISABLED-NEXT: ret void
3772 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3773 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7
3774 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3775 ; NVPTX-DISABLED-NEXT: entry:
3776 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3777 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3778 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3779 ; NVPTX-DISABLED-NEXT: ret void
3782 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3783 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper
3784 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3785 ; NVPTX-DISABLED-NEXT: entry:
3786 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3787 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3788 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3789 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3790 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3791 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__7(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3792 ; NVPTX-DISABLED-NEXT: ret void
3795 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3796 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8
3797 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3798 ; NVPTX-DISABLED-NEXT: entry:
3799 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3800 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3801 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3802 ; NVPTX-DISABLED-NEXT: ret void
3805 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3806 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__8_wrapper
3807 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3808 ; NVPTX-DISABLED-NEXT: entry:
3809 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3810 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3811 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3812 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3813 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3814 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__8(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3815 ; NVPTX-DISABLED-NEXT: ret void
3818 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3819 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_no_openmp_attr_l66
3820 ; NVPTX-DISABLED-SAME: () #[[ATTR0]] {
3821 ; NVPTX-DISABLED-NEXT: entry:
3822 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3823 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3824 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3825 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3826 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3827 ; NVPTX-DISABLED: user_code.entry:
3828 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3829 ; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3830 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__9(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3831 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3832 ; NVPTX-DISABLED-NEXT: ret void
3833 ; NVPTX-DISABLED: worker.exit:
3834 ; NVPTX-DISABLED-NEXT: ret void
3837 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3838 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__9
3839 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3840 ; NVPTX-DISABLED-NEXT: entry:
3841 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3842 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3843 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3844 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
3845 ; NVPTX-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3846 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3847 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3848 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__10 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__10_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3849 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
3850 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
3851 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__11 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__11_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
3852 ; NVPTX-DISABLED-NEXT: ret void
3855 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3856 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10
3857 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3858 ; NVPTX-DISABLED-NEXT: entry:
3859 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3860 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3861 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3862 ; NVPTX-DISABLED-NEXT: ret void
3865 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3866 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__10_wrapper
3867 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3868 ; NVPTX-DISABLED-NEXT: entry:
3869 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3870 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3871 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3872 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3873 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3874 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__10(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3875 ; NVPTX-DISABLED-NEXT: ret void
3878 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3879 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11
3880 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3881 ; NVPTX-DISABLED-NEXT: entry:
3882 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3883 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3884 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3885 ; NVPTX-DISABLED-NEXT: ret void
3888 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3889 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__11_wrapper
3890 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3891 ; NVPTX-DISABLED-NEXT: entry:
3892 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3893 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3894 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3895 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3896 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3897 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__11(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3898 ; NVPTX-DISABLED-NEXT: ret void
3901 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3902 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_pure_l77
3903 ; NVPTX-DISABLED-SAME: () #[[ATTR0]] {
3904 ; NVPTX-DISABLED-NEXT: entry:
3905 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3906 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3907 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3908 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3909 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3910 ; NVPTX-DISABLED: user_code.entry:
3911 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3912 ; NVPTX-DISABLED-NEXT: store i32 [[TMP1]], i32* [[DOTTHREADID_TEMP_]], align 4
3913 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__12(i32* noalias nocapture noundef nonnull readonly align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3914 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3915 ; NVPTX-DISABLED-NEXT: ret void
3916 ; NVPTX-DISABLED: worker.exit:
3917 ; NVPTX-DISABLED-NEXT: ret void
3920 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3921 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__12
3922 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3923 ; NVPTX-DISABLED-NEXT: entry:
3924 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3925 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3926 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
3927 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8
3928 ; NVPTX-DISABLED-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
3929 ; NVPTX-DISABLED-NEXT: call void @unknown_no_openmp() #[[ATTR9]]
3930 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4
3931 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
3932 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__13 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__13_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
3933 ; NVPTX-DISABLED-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
3934 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB1]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__14 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__14_wrapper to i8*), i8** noundef [[TMP2]], i64 noundef 0)
3935 ; NVPTX-DISABLED-NEXT: ret void
3938 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3939 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13
3940 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3941 ; NVPTX-DISABLED-NEXT: entry:
3942 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3943 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3944 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR10]]
3945 ; NVPTX-DISABLED-NEXT: ret void
3948 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3949 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__13_wrapper
3950 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3951 ; NVPTX-DISABLED-NEXT: entry:
3952 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3953 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3954 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3955 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3956 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3957 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__13(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3958 ; NVPTX-DISABLED-NEXT: ret void
3961 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3962 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14
3963 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
3964 ; NVPTX-DISABLED-NEXT: entry:
3965 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
3966 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
3967 ; NVPTX-DISABLED-NEXT: call void @p1() #[[ATTR10]]
3968 ; NVPTX-DISABLED-NEXT: ret void
3971 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3972 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__14_wrapper
3973 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
3974 ; NVPTX-DISABLED-NEXT: entry:
3975 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
3976 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
3977 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3978 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
3979 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
3980 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__14(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
3981 ; NVPTX-DISABLED-NEXT: ret void
3984 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
3985 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_simple_state_machine_interprocedural_nested_recursive_l92
3986 ; NVPTX-DISABLED-SAME: () #[[ATTR0]] {
3987 ; NVPTX-DISABLED-NEXT: entry:
3988 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
3989 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
3990 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
3991 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
3992 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
3993 ; NVPTX-DISABLED: user_code.entry:
3994 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
3995 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__15(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
3996 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
3997 ; NVPTX-DISABLED-NEXT: ret void
3998 ; NVPTX-DISABLED: worker.exit:
3999 ; NVPTX-DISABLED-NEXT: ret void
4002 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4003 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__15
4004 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4005 ; NVPTX-DISABLED-NEXT: entry:
4006 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4007 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4008 ; NVPTX-DISABLED-NEXT: [[CALL:%.*]] = call i32 bitcast (i32 (...)* @omp_get_thread_num to i32 ()*)() #[[ATTR10]]
4009 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[CALL]]) #[[ATTR8]]
4010 ; NVPTX-DISABLED-NEXT: ret void
4013 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
4014 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after.internalized
4015 ; NVPTX-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
4016 ; NVPTX-DISABLED-NEXT: entry:
4017 ; NVPTX-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
4018 ; NVPTX-DISABLED-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
4019 ; NVPTX-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], 0
4020 ; NVPTX-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
4021 ; NVPTX-DISABLED: if.then:
4022 ; NVPTX-DISABLED-NEXT: br label [[RETURN:%.*]]
4023 ; NVPTX-DISABLED: if.end:
4024 ; NVPTX-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[A]], 1
4025 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after.internalized(i32 [[SUB]]) #[[ATTR8]]
4026 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after.internalized() #[[ATTR8]]
4027 ; NVPTX-DISABLED-NEXT: br label [[RETURN]]
4028 ; NVPTX-DISABLED: return:
4029 ; NVPTX-DISABLED-NEXT: ret void
4032 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
4033 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after
4034 ; NVPTX-DISABLED-SAME: (i32 [[A:%.*]]) #[[ATTR1]] {
4035 ; NVPTX-DISABLED-NEXT: entry:
4036 ; NVPTX-DISABLED-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
4037 ; NVPTX-DISABLED-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
4038 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
4039 ; NVPTX-DISABLED-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP0]], 0
4040 ; NVPTX-DISABLED-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
4041 ; NVPTX-DISABLED: if.then:
4042 ; NVPTX-DISABLED-NEXT: br label [[RETURN:%.*]]
4043 ; NVPTX-DISABLED: if.end:
4044 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4
4045 ; NVPTX-DISABLED-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 1
4046 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after(i32 [[SUB]]) #[[ATTR10]]
4047 ; NVPTX-DISABLED-NEXT: call void @simple_state_machine_interprocedural_nested_recursive_after_after() #[[ATTR10]]
4048 ; NVPTX-DISABLED-NEXT: br label [[RETURN]]
4049 ; NVPTX-DISABLED: return:
4050 ; NVPTX-DISABLED-NEXT: ret void
4053 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4054 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_offloading_14_a36502b_no_state_machine_weak_callee_l112
4055 ; NVPTX-DISABLED-SAME: () #[[ATTR0]] {
4056 ; NVPTX-DISABLED-NEXT: entry:
4057 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4058 ; NVPTX-DISABLED-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
4059 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i8 1, i1 true, i1 true)
4060 ; NVPTX-DISABLED-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
4061 ; NVPTX-DISABLED-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
4062 ; NVPTX-DISABLED: user_code.entry:
4063 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) #[[ATTR3]]
4064 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__16(i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTTHREADID_TEMP_]], i32* noalias nocapture noundef nonnull readnone align 4 dereferenceable(4) [[DOTZERO_ADDR]]) #[[ATTR3]]
4065 ; NVPTX-DISABLED-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
4066 ; NVPTX-DISABLED-NEXT: ret void
4067 ; NVPTX-DISABLED: worker.exit:
4068 ; NVPTX-DISABLED-NEXT: ret void
4071 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4072 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__16
4073 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree nonnull readnone align 4 dereferenceable(4) [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4074 ; NVPTX-DISABLED-NEXT: entry:
4075 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4076 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4077 ; NVPTX-DISABLED-NEXT: call void @weak_callee_empty() #[[ATTR8]]
4078 ; NVPTX-DISABLED-NEXT: ret void
4081 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
4082 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@weak_callee_empty
4083 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
4084 ; NVPTX-DISABLED-NEXT: entry:
4085 ; NVPTX-DISABLED-NEXT: ret void
4088 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4089 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17
4090 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4091 ; NVPTX-DISABLED-NEXT: entry:
4092 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4093 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4094 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR10]]
4095 ; NVPTX-DISABLED-NEXT: ret void
4098 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4099 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__17_wrapper
4100 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4101 ; NVPTX-DISABLED-NEXT: entry:
4102 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4103 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4104 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4105 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
4106 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
4107 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__17(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
4108 ; NVPTX-DISABLED-NEXT: ret void
4111 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4112 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18
4113 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4114 ; NVPTX-DISABLED-NEXT: entry:
4115 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4116 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4117 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR10]]
4118 ; NVPTX-DISABLED-NEXT: ret void
4121 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4122 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__18_wrapper
4123 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4124 ; NVPTX-DISABLED-NEXT: entry:
4125 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4126 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4127 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4128 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
4129 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
4130 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__18(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
4131 ; NVPTX-DISABLED-NEXT: ret void
4134 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
4135 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after.internalized
4136 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
4137 ; NVPTX-DISABLED-NEXT: entry:
4138 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
4139 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* noundef @[[GLOB2]]) #[[ATTR3]]
4140 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
4141 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* noundef @[[GLOB2]], i32 [[TMP0]], i32 noundef 1, i32 noundef -1, i32 noundef -1, i8* noundef bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* noundef bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** noundef [[TMP1]], i64 noundef 0)
4142 ; NVPTX-DISABLED-NEXT: ret void
4145 ; NVPTX-DISABLED: Function Attrs: convergent noinline nounwind
4146 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@simple_state_machine_interprocedural_nested_recursive_after_after
4147 ; NVPTX-DISABLED-SAME: () #[[ATTR1]] {
4148 ; NVPTX-DISABLED-NEXT: entry:
4149 ; NVPTX-DISABLED-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8
4150 ; NVPTX-DISABLED-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]])
4151 ; NVPTX-DISABLED-NEXT: [[TMP1:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
4152 ; NVPTX-DISABLED-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__19 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__19_wrapper to i8*), i8** [[TMP1]], i64 0)
4153 ; NVPTX-DISABLED-NEXT: ret void
4156 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4157 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19
4158 ; NVPTX-DISABLED-SAME: (i32* noalias nocapture nofree readnone [[DOTGLOBAL_TID_:%.*]], i32* noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
4159 ; NVPTX-DISABLED-NEXT: entry:
4160 ; NVPTX-DISABLED-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
4161 ; NVPTX-DISABLED-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
4162 ; NVPTX-DISABLED-NEXT: call void @p0() #[[ATTR10]]
4163 ; NVPTX-DISABLED-NEXT: ret void
4166 ; NVPTX-DISABLED: Function Attrs: convergent noinline norecurse nounwind
4167 ; NVPTX-DISABLED-LABEL: define {{[^@]+}}@__omp_outlined__19_wrapper
4168 ; NVPTX-DISABLED-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] {
4169 ; NVPTX-DISABLED-NEXT: entry:
4170 ; NVPTX-DISABLED-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
4171 ; NVPTX-DISABLED-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
4172 ; NVPTX-DISABLED-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
4173 ; NVPTX-DISABLED-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
4174 ; NVPTX-DISABLED-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
4175 ; NVPTX-DISABLED-NEXT: call void @__omp_outlined__19(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR3]]
4176 ; NVPTX-DISABLED-NEXT: ret void