1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
3 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK
4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
5 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK
6 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK
7 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK
10 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
11 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
12 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
13 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
14 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
15 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
17 // Test target codegen - host bc file has to be created first.
18 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
19 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix TCHECK
20 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
21 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix TCHECK
22 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
23 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix TCHECK
24 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
25 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix TCHECK
27 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
28 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
29 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
30 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
31 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
32 // RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1 %s
33 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
34 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
36 // expected-no-diagnostics
44 // Check target registration is registered as a Ctor.
47 template<typename tx
, typename ty
>
63 TT
<long long, char> d
;
66 #pragma omp target teams loop device(global + a) depend(in: global) depend(out: a, b, cn[4])
67 for (int i
= 0; i
< 10; ++i
) {
76 #pragma omp target teams loop device(global + a) depend(inout: global, a, bn) if(target:a)
77 for (int i
= 0; i
< *plocal
; ++i
) {
83 #pragma omp target teams loop if(0) firstprivate(global) depend(out:global)
84 for (int i
= 0; i
< global
; ++i
) {
91 // Check that the offloading functions are emitted and that the arguments are
92 // correct and loaded correctly for the target regions in foo().
100 // Create stack storage and store argument in there.
105 // CHECK-64-LABEL: define {{[^@]+}}@_Z3fooi
106 // CHECK-64-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] {
107 // CHECK-64-NEXT: entry:
108 // CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
109 // CHECK-64-NEXT: [[A:%.*]] = alloca i32, align 4
110 // CHECK-64-NEXT: [[AA:%.*]] = alloca i16, align 2
111 // CHECK-64-NEXT: [[B:%.*]] = alloca [10 x float], align 4
112 // CHECK-64-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8
113 // CHECK-64-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
114 // CHECK-64-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
115 // CHECK-64-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
116 // CHECK-64-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8
117 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
118 // CHECK-64-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
119 // CHECK-64-NEXT: [[DOTDEP_ARR_ADDR:%.*]] = alloca [4 x %struct.kmp_depend_info], align 8
120 // CHECK-64-NEXT: [[DEP_COUNTER_ADDR:%.*]] = alloca i64, align 8
121 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
122 // CHECK-64-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i64, align 8
123 // CHECK-64-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
124 // CHECK-64-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
125 // CHECK-64-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
126 // CHECK-64-NEXT: [[AGG_CAPTURED4:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 8
127 // CHECK-64-NEXT: [[DOTDEP_ARR_ADDR5:%.*]] = alloca [3 x %struct.kmp_depend_info], align 8
128 // CHECK-64-NEXT: [[DEP_COUNTER_ADDR6:%.*]] = alloca i64, align 8
129 // CHECK-64-NEXT: [[AGG_CAPTURED7:%.*]] = alloca [[STRUCT_ANON_0]], align 8
130 // CHECK-64-NEXT: [[DOTDEP_ARR_ADDR8:%.*]] = alloca [3 x %struct.kmp_depend_info], align 8
131 // CHECK-64-NEXT: [[DEP_COUNTER_ADDR9:%.*]] = alloca i64, align 8
132 // CHECK-64-NEXT: [[GLOBAL_CASTED10:%.*]] = alloca i64, align 8
133 // CHECK-64-NEXT: [[AGG_CAPTURED12:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 4
134 // CHECK-64-NEXT: [[DOTDEP_ARR_ADDR13:%.*]] = alloca [1 x %struct.kmp_depend_info], align 8
135 // CHECK-64-NEXT: [[DEP_COUNTER_ADDR14:%.*]] = alloca i64, align 8
136 // CHECK-64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
137 // CHECK-64-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
138 // CHECK-64-NEXT: store i32 0, i32* [[A]], align 4
139 // CHECK-64-NEXT: store i16 0, i16* [[AA]], align 2
140 // CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
141 // CHECK-64-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
142 // CHECK-64-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave()
143 // CHECK-64-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
144 // CHECK-64-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4
145 // CHECK-64-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
146 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4
147 // CHECK-64-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
148 // CHECK-64-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]]
149 // CHECK-64-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8
150 // CHECK-64-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8
151 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* @global, align 4
152 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4
153 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[TMP8]]
154 // CHECK-64-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_]], align 4
155 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
156 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
157 // CHECK-64-NEXT: store i32 [[TMP10]], i32* [[TMP9]], align 4
158 // CHECK-64-NEXT: [[TMP11:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i64 40, i64 4, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
159 // CHECK-64-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to %struct.kmp_task_t_with_privates*
160 // CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP12]], i32 0, i32 0
161 // CHECK-64-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP13]], i32 0, i32 0
162 // CHECK-64-NEXT: [[TMP15:%.*]] = load i8*, i8** [[TMP14]], align 8
163 // CHECK-64-NEXT: [[TMP16:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
164 // CHECK-64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP15]], i8* align 4 [[TMP16]], i64 4, i1 false)
165 // CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], [4 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR]], i64 0, i64 0
166 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], %struct.kmp_depend_info* [[TMP17]], i64 0
167 // CHECK-64-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP18]], i32 0, i32 0
168 // CHECK-64-NEXT: store i64 ptrtoint (i32* @global to i64), i64* [[TMP19]], align 8
169 // CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP18]], i32 0, i32 1
170 // CHECK-64-NEXT: store i64 4, i64* [[TMP20]], align 8
171 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP18]], i32 0, i32 2
172 // CHECK-64-NEXT: store i8 1, i8* [[TMP21]], align 8
173 // CHECK-64-NEXT: [[TMP22:%.*]] = ptrtoint i32* [[A]] to i64
174 // CHECK-64-NEXT: [[TMP23:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP17]], i64 1
175 // CHECK-64-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP23]], i32 0, i32 0
176 // CHECK-64-NEXT: store i64 [[TMP22]], i64* [[TMP24]], align 8
177 // CHECK-64-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP23]], i32 0, i32 1
178 // CHECK-64-NEXT: store i64 4, i64* [[TMP25]], align 8
179 // CHECK-64-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP23]], i32 0, i32 2
180 // CHECK-64-NEXT: store i8 3, i8* [[TMP26]], align 8
181 // CHECK-64-NEXT: [[TMP27:%.*]] = ptrtoint [10 x float]* [[B]] to i64
182 // CHECK-64-NEXT: [[TMP28:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP17]], i64 2
183 // CHECK-64-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP28]], i32 0, i32 0
184 // CHECK-64-NEXT: store i64 [[TMP27]], i64* [[TMP29]], align 8
185 // CHECK-64-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP28]], i32 0, i32 1
186 // CHECK-64-NEXT: store i64 40, i64* [[TMP30]], align 8
187 // CHECK-64-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP28]], i32 0, i32 2
188 // CHECK-64-NEXT: store i8 3, i8* [[TMP31]], align 8
189 // CHECK-64-NEXT: [[TMP32:%.*]] = mul nsw i64 4, [[TMP5]]
190 // CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP32]]
191 // CHECK-64-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP5]], 8
192 // CHECK-64-NEXT: [[TMP34:%.*]] = ptrtoint double* [[ARRAYIDX]] to i64
193 // CHECK-64-NEXT: [[TMP35:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP17]], i64 3
194 // CHECK-64-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP35]], i32 0, i32 0
195 // CHECK-64-NEXT: store i64 [[TMP34]], i64* [[TMP36]], align 8
196 // CHECK-64-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP35]], i32 0, i32 1
197 // CHECK-64-NEXT: store i64 [[TMP33]], i64* [[TMP37]], align 8
198 // CHECK-64-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP35]], i32 0, i32 2
199 // CHECK-64-NEXT: store i8 3, i8* [[TMP38]], align 8
200 // CHECK-64-NEXT: store i64 4, i64* [[DEP_COUNTER_ADDR]], align 8
201 // CHECK-64-NEXT: [[TMP39:%.*]] = bitcast %struct.kmp_depend_info* [[TMP17]] to i8*
202 // CHECK-64-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 4, i8* [[TMP39]], i32 0, i8* null, i32 0)
203 // CHECK-64-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP11]])
204 // CHECK-64-NEXT: [[TMP40:%.*]] = call i32 @.omp_task_entry.(i32 [[TMP0]], %struct.kmp_task_t_with_privates* [[TMP12]]) #[[ATTR3:[0-9]+]]
205 // CHECK-64-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP11]])
206 // CHECK-64-NEXT: [[TMP41:%.*]] = load i32, i32* @global, align 4
207 // CHECK-64-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4
208 // CHECK-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP41]], [[TMP42]]
209 // CHECK-64-NEXT: store i32 [[ADD3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
210 // CHECK-64-NEXT: [[TMP43:%.*]] = load i64*, i64** @_ZZ3fooiE6plocal, align 8
211 // CHECK-64-NEXT: [[TMP44:%.*]] = load i32, i32* @global, align 4
212 // CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_CASTED]] to i32*
213 // CHECK-64-NEXT: store i32 [[TMP44]], i32* [[CONV]], align 4
214 // CHECK-64-NEXT: [[TMP45:%.*]] = load i64, i64* [[GLOBAL_CASTED]], align 8
215 // CHECK-64-NEXT: [[TMP46:%.*]] = load i32, i32* [[A]], align 4
216 // CHECK-64-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP46]], 0
217 // CHECK-64-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
218 // CHECK-64: omp_if.then:
219 // CHECK-64-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
220 // CHECK-64-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64**
221 // CHECK-64-NEXT: store i64* [[TMP43]], i64** [[TMP48]], align 8
222 // CHECK-64-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
223 // CHECK-64-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i64**
224 // CHECK-64-NEXT: store i64* [[TMP43]], i64** [[TMP50]], align 8
225 // CHECK-64-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
226 // CHECK-64-NEXT: store i8* null, i8** [[TMP51]], align 8
227 // CHECK-64-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
228 // CHECK-64-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64*
229 // CHECK-64-NEXT: store i64 [[TMP45]], i64* [[TMP53]], align 8
230 // CHECK-64-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
231 // CHECK-64-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64*
232 // CHECK-64-NEXT: store i64 [[TMP45]], i64* [[TMP55]], align 8
233 // CHECK-64-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
234 // CHECK-64-NEXT: store i8* null, i8** [[TMP56]], align 8
235 // CHECK-64-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
236 // CHECK-64-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
237 // CHECK-64-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED4]], i32 0, i32 0
238 // CHECK-64-NEXT: [[TMP60:%.*]] = load i64*, i64** @_ZZ3fooiE6plocal, align 8
239 // CHECK-64-NEXT: store i64* [[TMP60]], i64** [[TMP59]], align 8
240 // CHECK-64-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED4]], i32 0, i32 1
241 // CHECK-64-NEXT: [[TMP62:%.*]] = load i32, i32* @global, align 4
242 // CHECK-64-NEXT: store i32 [[TMP62]], i32* [[TMP61]], align 8
243 // CHECK-64-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED4]], i32 0, i32 2
244 // CHECK-64-NEXT: [[TMP64:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
245 // CHECK-64-NEXT: store i32 [[TMP64]], i32* [[TMP63]], align 4
246 // CHECK-64-NEXT: [[TMP65:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i64 104, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.1*)* @.omp_task_entry..6 to i32 (i32, i8*)*))
247 // CHECK-64-NEXT: [[TMP66:%.*]] = bitcast i8* [[TMP65]] to %struct.kmp_task_t_with_privates.1*
248 // CHECK-64-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP66]], i32 0, i32 0
249 // CHECK-64-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP67]], i32 0, i32 0
250 // CHECK-64-NEXT: [[TMP69:%.*]] = load i8*, i8** [[TMP68]], align 8
251 // CHECK-64-NEXT: [[TMP70:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED4]] to i8*
252 // CHECK-64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP69]], i8* align 8 [[TMP70]], i64 16, i1 false)
253 // CHECK-64-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1]], %struct.kmp_task_t_with_privates.1* [[TMP66]], i32 0, i32 1
254 // CHECK-64-NEXT: [[TMP72:%.*]] = bitcast i8* [[TMP69]] to %struct.anon.0*
255 // CHECK-64-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP71]], i32 0, i32 0
256 // CHECK-64-NEXT: [[TMP74:%.*]] = load i64*, i64** @_ZZ3fooiE6plocal, align 8
257 // CHECK-64-NEXT: store i64* [[TMP74]], i64** [[TMP73]], align 8
258 // CHECK-64-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP71]], i32 0, i32 1
259 // CHECK-64-NEXT: [[TMP76:%.*]] = bitcast [2 x i8*]* [[TMP75]] to i8*
260 // CHECK-64-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP57]] to i8*
261 // CHECK-64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP76]], i8* align 8 [[TMP77]], i64 16, i1 false)
262 // CHECK-64-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP71]], i32 0, i32 2
263 // CHECK-64-NEXT: [[TMP79:%.*]] = bitcast [2 x i8*]* [[TMP78]] to i8*
264 // CHECK-64-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP58]] to i8*
265 // CHECK-64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP79]], i8* align 8 [[TMP80]], i64 16, i1 false)
266 // CHECK-64-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP71]], i32 0, i32 3
267 // CHECK-64-NEXT: [[TMP82:%.*]] = bitcast [2 x i64]* [[TMP81]] to i8*
268 // CHECK-64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP82]], i8* align 8 bitcast ([2 x i64]* @.offload_sizes to i8*), i64 16, i1 false)
269 // CHECK-64-NEXT: [[TMP83:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP71]], i32 0, i32 4
270 // CHECK-64-NEXT: [[TMP84:%.*]] = load i32, i32* @global, align 4
271 // CHECK-64-NEXT: store i32 [[TMP84]], i32* [[TMP83]], align 8
272 // CHECK-64-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x %struct.kmp_depend_info], [3 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR5]], i64 0, i64 0
273 // CHECK-64-NEXT: [[TMP86:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP85]], i64 0
274 // CHECK-64-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP86]], i32 0, i32 0
275 // CHECK-64-NEXT: store i64 ptrtoint (i32* @global to i64), i64* [[TMP87]], align 8
276 // CHECK-64-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP86]], i32 0, i32 1
277 // CHECK-64-NEXT: store i64 4, i64* [[TMP88]], align 8
278 // CHECK-64-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP86]], i32 0, i32 2
279 // CHECK-64-NEXT: store i8 3, i8* [[TMP89]], align 8
280 // CHECK-64-NEXT: [[TMP90:%.*]] = ptrtoint i32* [[A]] to i64
281 // CHECK-64-NEXT: [[TMP91:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP85]], i64 1
282 // CHECK-64-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP91]], i32 0, i32 0
283 // CHECK-64-NEXT: store i64 [[TMP90]], i64* [[TMP92]], align 8
284 // CHECK-64-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP91]], i32 0, i32 1
285 // CHECK-64-NEXT: store i64 4, i64* [[TMP93]], align 8
286 // CHECK-64-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP91]], i32 0, i32 2
287 // CHECK-64-NEXT: store i8 3, i8* [[TMP94]], align 8
288 // CHECK-64-NEXT: [[TMP95:%.*]] = mul nuw i64 [[TMP2]], 4
289 // CHECK-64-NEXT: [[TMP96:%.*]] = ptrtoint float* [[VLA]] to i64
290 // CHECK-64-NEXT: [[TMP97:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP85]], i64 2
291 // CHECK-64-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP97]], i32 0, i32 0
292 // CHECK-64-NEXT: store i64 [[TMP96]], i64* [[TMP98]], align 8
293 // CHECK-64-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP97]], i32 0, i32 1
294 // CHECK-64-NEXT: store i64 [[TMP95]], i64* [[TMP99]], align 8
295 // CHECK-64-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP97]], i32 0, i32 2
296 // CHECK-64-NEXT: store i8 3, i8* [[TMP100]], align 8
297 // CHECK-64-NEXT: store i64 3, i64* [[DEP_COUNTER_ADDR6]], align 8
298 // CHECK-64-NEXT: [[TMP101:%.*]] = bitcast %struct.kmp_depend_info* [[TMP85]] to i8*
299 // CHECK-64-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 3, i8* [[TMP101]], i32 0, i8* null, i32 0)
300 // CHECK-64-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP65]])
301 // CHECK-64-NEXT: [[TMP102:%.*]] = call i32 @.omp_task_entry..6(i32 [[TMP0]], %struct.kmp_task_t_with_privates.1* [[TMP66]]) #[[ATTR3]]
302 // CHECK-64-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP65]])
303 // CHECK-64-NEXT: br label [[OMP_IF_END:%.*]]
304 // CHECK-64: omp_if.else:
305 // CHECK-64-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED7]], i32 0, i32 0
306 // CHECK-64-NEXT: [[TMP104:%.*]] = load i64*, i64** @_ZZ3fooiE6plocal, align 8
307 // CHECK-64-NEXT: store i64* [[TMP104]], i64** [[TMP103]], align 8
308 // CHECK-64-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED7]], i32 0, i32 1
309 // CHECK-64-NEXT: [[TMP106:%.*]] = load i32, i32* @global, align 4
310 // CHECK-64-NEXT: store i32 [[TMP106]], i32* [[TMP105]], align 8
311 // CHECK-64-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED7]], i32 0, i32 2
312 // CHECK-64-NEXT: [[TMP108:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
313 // CHECK-64-NEXT: store i32 [[TMP108]], i32* [[TMP107]], align 4
314 // CHECK-64-NEXT: [[TMP109:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i64 56, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.2*)* @.omp_task_entry..9 to i32 (i32, i8*)*))
315 // CHECK-64-NEXT: [[TMP110:%.*]] = bitcast i8* [[TMP109]] to %struct.kmp_task_t_with_privates.2*
316 // CHECK-64-NEXT: [[TMP111:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP110]], i32 0, i32 0
317 // CHECK-64-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP111]], i32 0, i32 0
318 // CHECK-64-NEXT: [[TMP113:%.*]] = load i8*, i8** [[TMP112]], align 8
319 // CHECK-64-NEXT: [[TMP114:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED7]] to i8*
320 // CHECK-64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP113]], i8* align 8 [[TMP114]], i64 16, i1 false)
321 // CHECK-64-NEXT: [[TMP115:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP110]], i32 0, i32 1
322 // CHECK-64-NEXT: [[TMP116:%.*]] = bitcast i8* [[TMP113]] to %struct.anon.0*
323 // CHECK-64-NEXT: [[TMP117:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP115]], i32 0, i32 0
324 // CHECK-64-NEXT: [[TMP118:%.*]] = load i64*, i64** @_ZZ3fooiE6plocal, align 8
325 // CHECK-64-NEXT: store i64* [[TMP118]], i64** [[TMP117]], align 8
326 // CHECK-64-NEXT: [[TMP119:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP115]], i32 0, i32 1
327 // CHECK-64-NEXT: [[TMP120:%.*]] = load i32, i32* @global, align 4
328 // CHECK-64-NEXT: store i32 [[TMP120]], i32* [[TMP119]], align 8
329 // CHECK-64-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x %struct.kmp_depend_info], [3 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR8]], i64 0, i64 0
330 // CHECK-64-NEXT: [[TMP122:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP121]], i64 0
331 // CHECK-64-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP122]], i32 0, i32 0
332 // CHECK-64-NEXT: store i64 ptrtoint (i32* @global to i64), i64* [[TMP123]], align 8
333 // CHECK-64-NEXT: [[TMP124:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP122]], i32 0, i32 1
334 // CHECK-64-NEXT: store i64 4, i64* [[TMP124]], align 8
335 // CHECK-64-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP122]], i32 0, i32 2
336 // CHECK-64-NEXT: store i8 3, i8* [[TMP125]], align 8
337 // CHECK-64-NEXT: [[TMP126:%.*]] = ptrtoint i32* [[A]] to i64
338 // CHECK-64-NEXT: [[TMP127:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP121]], i64 1
339 // CHECK-64-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP127]], i32 0, i32 0
340 // CHECK-64-NEXT: store i64 [[TMP126]], i64* [[TMP128]], align 8
341 // CHECK-64-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP127]], i32 0, i32 1
342 // CHECK-64-NEXT: store i64 4, i64* [[TMP129]], align 8
343 // CHECK-64-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP127]], i32 0, i32 2
344 // CHECK-64-NEXT: store i8 3, i8* [[TMP130]], align 8
345 // CHECK-64-NEXT: [[TMP131:%.*]] = mul nuw i64 [[TMP2]], 4
346 // CHECK-64-NEXT: [[TMP132:%.*]] = ptrtoint float* [[VLA]] to i64
347 // CHECK-64-NEXT: [[TMP133:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP121]], i64 2
348 // CHECK-64-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP133]], i32 0, i32 0
349 // CHECK-64-NEXT: store i64 [[TMP132]], i64* [[TMP134]], align 8
350 // CHECK-64-NEXT: [[TMP135:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP133]], i32 0, i32 1
351 // CHECK-64-NEXT: store i64 [[TMP131]], i64* [[TMP135]], align 8
352 // CHECK-64-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP133]], i32 0, i32 2
353 // CHECK-64-NEXT: store i8 3, i8* [[TMP136]], align 8
354 // CHECK-64-NEXT: store i64 3, i64* [[DEP_COUNTER_ADDR9]], align 8
355 // CHECK-64-NEXT: [[TMP137:%.*]] = bitcast %struct.kmp_depend_info* [[TMP121]] to i8*
356 // CHECK-64-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 3, i8* [[TMP137]], i32 0, i8* null, i32 0)
357 // CHECK-64-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP109]])
358 // CHECK-64-NEXT: [[TMP138:%.*]] = call i32 @.omp_task_entry..9(i32 [[TMP0]], %struct.kmp_task_t_with_privates.2* [[TMP110]]) #[[ATTR3]]
359 // CHECK-64-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP109]])
360 // CHECK-64-NEXT: br label [[OMP_IF_END]]
361 // CHECK-64: omp_if.end:
362 // CHECK-64-NEXT: [[TMP139:%.*]] = load i32, i32* @global, align 4
363 // CHECK-64-NEXT: [[CONV11:%.*]] = bitcast i64* [[GLOBAL_CASTED10]] to i32*
364 // CHECK-64-NEXT: store i32 [[TMP139]], i32* [[CONV11]], align 4
365 // CHECK-64-NEXT: [[TMP140:%.*]] = load i64, i64* [[GLOBAL_CASTED10]], align 8
366 // CHECK-64-NEXT: [[TMP141:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[AGG_CAPTURED12]], i32 0, i32 0
367 // CHECK-64-NEXT: [[TMP142:%.*]] = load i32, i32* @global, align 4
368 // CHECK-64-NEXT: store i32 [[TMP142]], i32* [[TMP141]], align 4
369 // CHECK-64-NEXT: [[TMP143:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i64 48, i64 4, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.5*)* @.omp_task_entry..14 to i32 (i32, i8*)*))
370 // CHECK-64-NEXT: [[TMP144:%.*]] = bitcast i8* [[TMP143]] to %struct.kmp_task_t_with_privates.5*
371 // CHECK-64-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], %struct.kmp_task_t_with_privates.5* [[TMP144]], i32 0, i32 0
372 // CHECK-64-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP145]], i32 0, i32 0
373 // CHECK-64-NEXT: [[TMP147:%.*]] = load i8*, i8** [[TMP146]], align 8
374 // CHECK-64-NEXT: [[TMP148:%.*]] = bitcast %struct.anon.4* [[AGG_CAPTURED12]] to i8*
375 // CHECK-64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP147]], i8* align 4 [[TMP148]], i64 4, i1 false)
376 // CHECK-64-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5]], %struct.kmp_task_t_with_privates.5* [[TMP144]], i32 0, i32 1
377 // CHECK-64-NEXT: [[TMP150:%.*]] = bitcast i8* [[TMP147]] to %struct.anon.4*
378 // CHECK-64-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_6:%.*]], %struct..kmp_privates.t.6* [[TMP149]], i32 0, i32 0
379 // CHECK-64-NEXT: [[TMP152:%.*]] = load i32, i32* @global, align 4
380 // CHECK-64-NEXT: store i32 [[TMP152]], i32* [[TMP151]], align 8
381 // CHECK-64-NEXT: [[TMP153:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], [1 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR13]], i64 0, i64 0
382 // CHECK-64-NEXT: [[TMP154:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP153]], i64 0
383 // CHECK-64-NEXT: [[TMP155:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP154]], i32 0, i32 0
384 // CHECK-64-NEXT: store i64 ptrtoint (i32* @global to i64), i64* [[TMP155]], align 8
385 // CHECK-64-NEXT: [[TMP156:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP154]], i32 0, i32 1
386 // CHECK-64-NEXT: store i64 4, i64* [[TMP156]], align 8
387 // CHECK-64-NEXT: [[TMP157:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP154]], i32 0, i32 2
388 // CHECK-64-NEXT: store i8 3, i8* [[TMP157]], align 8
389 // CHECK-64-NEXT: store i64 1, i64* [[DEP_COUNTER_ADDR14]], align 8
390 // CHECK-64-NEXT: [[TMP158:%.*]] = bitcast %struct.kmp_depend_info* [[TMP153]] to i8*
391 // CHECK-64-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i8* [[TMP158]], i32 0, i8* null, i32 0)
392 // CHECK-64-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP143]])
393 // CHECK-64-NEXT: [[TMP159:%.*]] = call i32 @.omp_task_entry..14(i32 [[TMP0]], %struct.kmp_task_t_with_privates.5* [[TMP144]]) #[[ATTR3]]
394 // CHECK-64-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP143]])
395 // CHECK-64-NEXT: [[TMP160:%.*]] = load i32, i32* [[A]], align 4
396 // CHECK-64-NEXT: [[TMP161:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
397 // CHECK-64-NEXT: call void @llvm.stackrestore(i8* [[TMP161]])
398 // CHECK-64-NEXT: ret i32 [[TMP160]]
399 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66
400 // CHECK-64-SAME: () #[[ATTR2:[0-9]+]] {
401 // CHECK-64-NEXT: entry:
402 // CHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
403 // CHECK-64-NEXT: ret void
404 // CHECK-64-LABEL: define {{[^@]+}}@.omp_outlined.
405 // CHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
406 // CHECK-64-NEXT: entry:
407 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
408 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
409 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
410 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
411 // CHECK-64-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
412 // CHECK-64-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
413 // CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
414 // CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
415 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
416 // CHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
417 // CHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
418 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
419 // CHECK-64-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
420 // CHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
421 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
422 // CHECK-64-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
423 // CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
424 // CHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
425 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
426 // CHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
427 // CHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
428 // CHECK-64: cond.true:
429 // CHECK-64-NEXT: br label [[COND_END:%.*]]
430 // CHECK-64: cond.false:
431 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
432 // CHECK-64-NEXT: br label [[COND_END]]
433 // CHECK-64: cond.end:
434 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
435 // CHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
436 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
437 // CHECK-64-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
438 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
439 // CHECK-64: omp.inner.for.cond:
440 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
441 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
442 // CHECK-64-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
443 // CHECK-64-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
444 // CHECK-64: omp.inner.for.body:
445 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
446 // CHECK-64-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
447 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
448 // CHECK-64-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
449 // CHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]])
450 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
451 // CHECK-64: omp.inner.for.inc:
452 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
453 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
454 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
455 // CHECK-64-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
456 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
457 // CHECK-64: omp.inner.for.end:
458 // CHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
459 // CHECK-64: omp.loop.exit:
460 // CHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
461 // CHECK-64-NEXT: ret void
462 // CHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..1
463 // CHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] {
464 // CHECK-64-NEXT: entry:
465 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
466 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
467 // CHECK-64-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
468 // CHECK-64-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
469 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
470 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
471 // CHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
472 // CHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
473 // CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
474 // CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
475 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
476 // CHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
477 // CHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
478 // CHECK-64-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
479 // CHECK-64-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
480 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
481 // CHECK-64-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
482 // CHECK-64-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
483 // CHECK-64-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
484 // CHECK-64-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
485 // CHECK-64-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
486 // CHECK-64-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
487 // CHECK-64-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
488 // CHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
489 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
490 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
491 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
492 // CHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
493 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
494 // CHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
495 // CHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
496 // CHECK-64: cond.true:
497 // CHECK-64-NEXT: br label [[COND_END:%.*]]
498 // CHECK-64: cond.false:
499 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
500 // CHECK-64-NEXT: br label [[COND_END]]
501 // CHECK-64: cond.end:
502 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
503 // CHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
504 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
505 // CHECK-64-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
506 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
507 // CHECK-64: omp.inner.for.cond:
508 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
509 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
510 // CHECK-64-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
511 // CHECK-64-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
512 // CHECK-64: omp.inner.for.body:
513 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
514 // CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
515 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
516 // CHECK-64-NEXT: store i32 [[ADD]], i32* [[I]], align 4
517 // CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
518 // CHECK-64: omp.body.continue:
519 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
520 // CHECK-64: omp.inner.for.inc:
521 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
522 // CHECK-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
523 // CHECK-64-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
524 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
525 // CHECK-64: omp.inner.for.end:
526 // CHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
527 // CHECK-64: omp.loop.exit:
528 // CHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
529 // CHECK-64-NEXT: ret void
530 // CHECK-64-LABEL: define {{[^@]+}}@.omp_task_entry.
531 // CHECK-64-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
532 // CHECK-64-NEXT: entry:
533 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
534 // CHECK-64-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
535 // CHECK-64-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
536 // CHECK-64-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
537 // CHECK-64-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
538 // CHECK-64-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
539 // CHECK-64-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
540 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
541 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
542 // CHECK-64-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
543 // CHECK-64-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
544 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
545 // CHECK-64-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
546 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
547 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
548 // CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
549 // CHECK-64-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
550 // CHECK-64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
551 // CHECK-64-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
552 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]])
553 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
554 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
555 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]])
556 // CHECK-64-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !16
557 // CHECK-64-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !16
558 // CHECK-64-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !16
559 // CHECK-64-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !16
560 // CHECK-64-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !16
561 // CHECK-64-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !16
562 // CHECK-64-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !16
563 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP10]], i32 0, i32 0
564 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
565 // CHECK-64-NEXT: [[TMP13:%.*]] = sext i32 [[TMP12]] to i64
566 // CHECK-64-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0
567 // CHECK-64-NEXT: store i32 2, i32* [[TMP14]], align 4, !noalias !16
568 // CHECK-64-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1
569 // CHECK-64-NEXT: store i32 0, i32* [[TMP15]], align 4, !noalias !16
570 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2
571 // CHECK-64-NEXT: store i8** null, i8*** [[TMP16]], align 8, !noalias !16
572 // CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3
573 // CHECK-64-NEXT: store i8** null, i8*** [[TMP17]], align 8, !noalias !16
574 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4
575 // CHECK-64-NEXT: store i64* null, i64** [[TMP18]], align 8, !noalias !16
576 // CHECK-64-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5
577 // CHECK-64-NEXT: store i64* null, i64** [[TMP19]], align 8, !noalias !16
578 // CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6
579 // CHECK-64-NEXT: store i8** null, i8*** [[TMP20]], align 8, !noalias !16
580 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7
581 // CHECK-64-NEXT: store i8** null, i8*** [[TMP21]], align 8, !noalias !16
582 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8
583 // CHECK-64-NEXT: store i64 10, i64* [[TMP22]], align 8, !noalias !16
584 // CHECK-64-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 9
585 // CHECK-64-NEXT: store i64 0, i64* [[TMP23]], align 8, !noalias !16
586 // CHECK-64-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 10
587 // CHECK-64-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP24]], align 4, !noalias !16
588 // CHECK-64-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 11
589 // CHECK-64-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP25]], align 4, !noalias !16
590 // CHECK-64-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 12
591 // CHECK-64-NEXT: store i32 0, i32* [[TMP26]], align 4, !noalias !16
592 // CHECK-64-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 [[TMP13]], i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]])
593 // CHECK-64-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
594 // CHECK-64-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__2_EXIT:%.*]]
595 // CHECK-64: omp_offload.failed.i:
596 // CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66() #[[ATTR3]]
597 // CHECK-64-NEXT: br label [[DOTOMP_OUTLINED__2_EXIT]]
598 // CHECK-64: .omp_outlined..2.exit:
599 // CHECK-64-NEXT: ret i32 0
600 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76
601 // CHECK-64-SAME: (i64* noundef [[PLOCAL:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
602 // CHECK-64-NEXT: entry:
603 // CHECK-64-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i64*, align 8
604 // CHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
605 // CHECK-64-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i64, align 8
606 // CHECK-64-NEXT: store i64* [[PLOCAL]], i64** [[PLOCAL_ADDR]], align 8
607 // CHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
608 // CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
609 // CHECK-64-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
610 // CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
611 // CHECK-64-NEXT: [[CONV1:%.*]] = bitcast i64* [[GLOBAL_CASTED]] to i32*
612 // CHECK-64-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
613 // CHECK-64-NEXT: [[TMP2:%.*]] = load i64, i64* [[GLOBAL_CASTED]], align 8
614 // CHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64* [[TMP0]], i64 [[TMP2]])
615 // CHECK-64-NEXT: ret void
616 // CHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..3
617 // CHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64* noundef [[PLOCAL:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
618 // CHECK-64-NEXT: entry:
619 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
620 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
621 // CHECK-64-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i64*, align 8
622 // CHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
623 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
624 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
625 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i64, align 8
626 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
627 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
628 // CHECK-64-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
629 // CHECK-64-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
630 // CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
631 // CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
632 // CHECK-64-NEXT: [[I4:%.*]] = alloca i32, align 4
633 // CHECK-64-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i64, align 8
634 // CHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
635 // CHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
636 // CHECK-64-NEXT: store i64* [[PLOCAL]], i64** [[PLOCAL_ADDR]], align 8
637 // CHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
638 // CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
639 // CHECK-64-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
640 // CHECK-64-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
641 // CHECK-64-NEXT: store i64 [[TMP1]], i64* [[DOTCAPTURE_EXPR_]], align 8
642 // CHECK-64-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
643 // CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP2]], 0
644 // CHECK-64-NEXT: [[DIV:%.*]] = sdiv i64 [[SUB]], 1
645 // CHECK-64-NEXT: [[CONV2:%.*]] = trunc i64 [[DIV]] to i32
646 // CHECK-64-NEXT: [[SUB3:%.*]] = sub nsw i32 [[CONV2]], 1
647 // CHECK-64-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_1]], align 4
648 // CHECK-64-NEXT: store i32 0, i32* [[I]], align 4
649 // CHECK-64-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
650 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i64 0, [[TMP3]]
651 // CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
652 // CHECK-64: omp.precond.then:
653 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
654 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
655 // CHECK-64-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
656 // CHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
657 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
658 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
659 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
660 // CHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
661 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
662 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
663 // CHECK-64-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
664 // CHECK-64-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
665 // CHECK-64: cond.true:
666 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
667 // CHECK-64-NEXT: br label [[COND_END:%.*]]
668 // CHECK-64: cond.false:
669 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
670 // CHECK-64-NEXT: br label [[COND_END]]
671 // CHECK-64: cond.end:
672 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
673 // CHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
674 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
675 // CHECK-64-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
676 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
677 // CHECK-64: omp.inner.for.cond:
678 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
679 // CHECK-64-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
680 // CHECK-64-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
681 // CHECK-64-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
682 // CHECK-64: omp.inner.for.body:
683 // CHECK-64-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
684 // CHECK-64-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
685 // CHECK-64-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
686 // CHECK-64-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
687 // CHECK-64-NEXT: [[TMP18:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
688 // CHECK-64-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4
689 // CHECK-64-NEXT: [[CONV7:%.*]] = bitcast i64* [[GLOBAL_CASTED]] to i32*
690 // CHECK-64-NEXT: store i32 [[TMP19]], i32* [[CONV7]], align 4
691 // CHECK-64-NEXT: [[TMP20:%.*]] = load i64, i64* [[GLOBAL_CASTED]], align 8
692 // CHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP15]], i64 [[TMP17]], i64* [[TMP18]], i64 [[TMP20]])
693 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
694 // CHECK-64: omp.inner.for.inc:
695 // CHECK-64-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
696 // CHECK-64-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
697 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
698 // CHECK-64-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
699 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
700 // CHECK-64: omp.inner.for.end:
701 // CHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
702 // CHECK-64: omp.loop.exit:
703 // CHECK-64-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
704 // CHECK-64-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
705 // CHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP24]])
706 // CHECK-64-NEXT: br label [[OMP_PRECOND_END]]
707 // CHECK-64: omp.precond.end:
708 // CHECK-64-NEXT: ret void
709 // CHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..4
710 // CHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64* noundef [[PLOCAL:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
711 // CHECK-64-NEXT: entry:
712 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
713 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
714 // CHECK-64-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
715 // CHECK-64-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
716 // CHECK-64-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i64*, align 8
717 // CHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
718 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
719 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
720 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i64, align 8
721 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
722 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
723 // CHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
724 // CHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
725 // CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
726 // CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
727 // CHECK-64-NEXT: [[I6:%.*]] = alloca i32, align 4
728 // CHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
729 // CHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
730 // CHECK-64-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
731 // CHECK-64-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
732 // CHECK-64-NEXT: store i64* [[PLOCAL]], i64** [[PLOCAL_ADDR]], align 8
733 // CHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
734 // CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
735 // CHECK-64-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
736 // CHECK-64-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
737 // CHECK-64-NEXT: store i64 [[TMP1]], i64* [[DOTCAPTURE_EXPR_]], align 8
738 // CHECK-64-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
739 // CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP2]], 0
740 // CHECK-64-NEXT: [[DIV:%.*]] = sdiv i64 [[SUB]], 1
741 // CHECK-64-NEXT: [[CONV2:%.*]] = trunc i64 [[DIV]] to i32
742 // CHECK-64-NEXT: [[SUB3:%.*]] = sub nsw i32 [[CONV2]], 1
743 // CHECK-64-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_1]], align 4
744 // CHECK-64-NEXT: store i32 0, i32* [[I]], align 4
745 // CHECK-64-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
746 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i64 0, [[TMP3]]
747 // CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
748 // CHECK-64: omp.precond.then:
749 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
750 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
751 // CHECK-64-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
752 // CHECK-64-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
753 // CHECK-64-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
754 // CHECK-64-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
755 // CHECK-64-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
756 // CHECK-64-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
757 // CHECK-64-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
758 // CHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
759 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
760 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
761 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
762 // CHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
763 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
764 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
765 // CHECK-64-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
766 // CHECK-64-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
767 // CHECK-64: cond.true:
768 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
769 // CHECK-64-NEXT: br label [[COND_END:%.*]]
770 // CHECK-64: cond.false:
771 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
772 // CHECK-64-NEXT: br label [[COND_END]]
773 // CHECK-64: cond.end:
774 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
775 // CHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
776 // CHECK-64-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
777 // CHECK-64-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
778 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
779 // CHECK-64: omp.inner.for.cond:
780 // CHECK-64-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
781 // CHECK-64-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
782 // CHECK-64-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
783 // CHECK-64-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
784 // CHECK-64: omp.inner.for.body:
785 // CHECK-64-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
786 // CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
787 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
788 // CHECK-64-NEXT: store i32 [[ADD]], i32* [[I6]], align 4
789 // CHECK-64-NEXT: [[TMP17:%.*]] = load i32, i32* [[CONV]], align 4
790 // CHECK-64-NEXT: [[CONV9:%.*]] = sext i32 [[TMP17]] to i64
791 // CHECK-64-NEXT: [[TMP18:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
792 // CHECK-64-NEXT: store i64 [[CONV9]], i64* [[TMP18]], align 8
793 // CHECK-64-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4
794 // CHECK-64-NEXT: store i32 [[TMP19]], i32* @_ZZ3fooiE6local1, align 4
795 // CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
796 // CHECK-64: omp.body.continue:
797 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
798 // CHECK-64: omp.inner.for.inc:
799 // CHECK-64-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
800 // CHECK-64-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1
801 // CHECK-64-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
802 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
803 // CHECK-64: omp.inner.for.end:
804 // CHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
805 // CHECK-64: omp.loop.exit:
806 // CHECK-64-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
807 // CHECK-64-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
808 // CHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP22]])
809 // CHECK-64-NEXT: br label [[OMP_PRECOND_END]]
810 // CHECK-64: omp.precond.end:
811 // CHECK-64-NEXT: ret void
812 // CHECK-64-LABEL: define {{[^@]+}}@.omp_task_privates_map.
813 // CHECK-64-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i64*** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]], [2 x i8*]** noalias noundef [[TMP3:%.*]], [2 x i8*]** noalias noundef [[TMP4:%.*]], [2 x i64]** noalias noundef [[TMP5:%.*]]) #[[ATTR7:[0-9]+]] {
814 // CHECK-64-NEXT: entry:
815 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
816 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i64***, align 8
817 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i32**, align 8
818 // CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca [2 x i8*]**, align 8
819 // CHECK-64-NEXT: [[DOTADDR4:%.*]] = alloca [2 x i8*]**, align 8
820 // CHECK-64-NEXT: [[DOTADDR5:%.*]] = alloca [2 x i64]**, align 8
821 // CHECK-64-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
822 // CHECK-64-NEXT: store i64*** [[TMP1]], i64**** [[DOTADDR1]], align 8
823 // CHECK-64-NEXT: store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
824 // CHECK-64-NEXT: store [2 x i8*]** [[TMP3]], [2 x i8*]*** [[DOTADDR3]], align 8
825 // CHECK-64-NEXT: store [2 x i8*]** [[TMP4]], [2 x i8*]*** [[DOTADDR4]], align 8
826 // CHECK-64-NEXT: store [2 x i64]** [[TMP5]], [2 x i64]*** [[DOTADDR5]], align 8
827 // CHECK-64-NEXT: [[TMP6:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
828 // CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 0
829 // CHECK-64-NEXT: [[TMP8:%.*]] = load i64***, i64**** [[DOTADDR1]], align 8
830 // CHECK-64-NEXT: store i64** [[TMP7]], i64*** [[TMP8]], align 8
831 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 1
832 // CHECK-64-NEXT: [[TMP10:%.*]] = load [2 x i8*]**, [2 x i8*]*** [[DOTADDR3]], align 8
833 // CHECK-64-NEXT: store [2 x i8*]* [[TMP9]], [2 x i8*]** [[TMP10]], align 8
834 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 2
835 // CHECK-64-NEXT: [[TMP12:%.*]] = load [2 x i8*]**, [2 x i8*]*** [[DOTADDR4]], align 8
836 // CHECK-64-NEXT: store [2 x i8*]* [[TMP11]], [2 x i8*]** [[TMP12]], align 8
837 // CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 3
838 // CHECK-64-NEXT: [[TMP14:%.*]] = load [2 x i64]**, [2 x i64]*** [[DOTADDR5]], align 8
839 // CHECK-64-NEXT: store [2 x i64]* [[TMP13]], [2 x i64]** [[TMP14]], align 8
840 // CHECK-64-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 4
841 // CHECK-64-NEXT: [[TMP16:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
842 // CHECK-64-NEXT: store i32* [[TMP15]], i32** [[TMP16]], align 8
843 // CHECK-64-NEXT: ret void
844 // CHECK-64-LABEL: define {{[^@]+}}@.omp_task_entry..6
845 // CHECK-64-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates.1* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
846 // CHECK-64-NEXT: entry:
847 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
848 // CHECK-64-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
849 // CHECK-64-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
850 // CHECK-64-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
851 // CHECK-64-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
852 // CHECK-64-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
853 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i64**, align 8
854 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
855 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [2 x i8*]*, align 8
856 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [2 x i8*]*, align 8
857 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR4_I:%.*]] = alloca [2 x i64]*, align 8
858 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR__I:%.*]] = alloca i64, align 8
859 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_5_I:%.*]] = alloca i32, align 4
860 // CHECK-64-NEXT: [[GLOBAL_CASTED_I:%.*]] = alloca i64, align 8
861 // CHECK-64-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
862 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
863 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.1*, align 8
864 // CHECK-64-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
865 // CHECK-64-NEXT: store %struct.kmp_task_t_with_privates.1* [[TMP1]], %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 8
866 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
867 // CHECK-64-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.1*, %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 8
868 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP3]], i32 0, i32 0
869 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
870 // CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
871 // CHECK-64-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
872 // CHECK-64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
873 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1]], %struct.kmp_task_t_with_privates.1* [[TMP3]], i32 0, i32 1
874 // CHECK-64-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
875 // CHECK-64-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.1* [[TMP3]] to i8*
876 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
877 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
878 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
879 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
880 // CHECK-64-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26
881 // CHECK-64-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !26
882 // CHECK-64-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
883 // CHECK-64-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i64***, i32**, [2 x i8*]**, [2 x i8*]**, [2 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
884 // CHECK-64-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !26
885 // CHECK-64-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
886 // CHECK-64-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !26
887 // CHECK-64-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26
888 // CHECK-64-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26
889 // CHECK-64-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i64***, i32**, [2 x i8*]**, [2 x i8*]**, [2 x i64]**)*
890 // CHECK-64-NEXT: call void [[TMP15]](i8* [[TMP14]], i64*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], [2 x i64]** [[DOTFIRSTPRIV_PTR_ADDR4_I]]) #[[ATTR3]]
891 // CHECK-64-NEXT: [[TMP16:%.*]] = load i64**, i64*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !26
892 // CHECK-64-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !26
893 // CHECK-64-NEXT: [[TMP18:%.*]] = load [2 x i8*]*, [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !26
894 // CHECK-64-NEXT: [[TMP19:%.*]] = load [2 x i8*]*, [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !26
895 // CHECK-64-NEXT: [[TMP20:%.*]] = load [2 x i64]*, [2 x i64]** [[DOTFIRSTPRIV_PTR_ADDR4_I]], align 8, !noalias !26
896 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP18]], i64 0, i64 0
897 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP19]], i64 0, i64 0
898 // CHECK-64-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[TMP20]], i64 0, i64 0
899 // CHECK-64-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 2
900 // CHECK-64-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
901 // CHECK-64-NEXT: [[TMP26:%.*]] = sext i32 [[TMP25]] to i64
902 // CHECK-64-NEXT: [[TMP27:%.*]] = load i64*, i64** [[TMP16]], align 8
903 // CHECK-64-NEXT: [[TMP28:%.*]] = load i64, i64* [[TMP27]], align 8
904 // CHECK-64-NEXT: store i64 [[TMP28]], i64* [[DOTCAPTURE_EXPR__I]], align 8, !noalias !26
905 // CHECK-64-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__I]], align 8, !noalias !26
906 // CHECK-64-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP29]] to i32
907 // CHECK-64-NEXT: [[SUB6_I:%.*]] = sub nsw i32 [[CONV_I]], 1
908 // CHECK-64-NEXT: store i32 [[SUB6_I]], i32* [[DOTCAPTURE_EXPR_5_I]], align 4, !noalias !26
909 // CHECK-64-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5_I]], align 4, !noalias !26
910 // CHECK-64-NEXT: [[ADD_I:%.*]] = add nsw i32 [[TMP30]], 1
911 // CHECK-64-NEXT: [[TMP31:%.*]] = zext i32 [[ADD_I]] to i64
912 // CHECK-64-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0
913 // CHECK-64-NEXT: store i32 2, i32* [[TMP32]], align 4, !noalias !26
914 // CHECK-64-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1
915 // CHECK-64-NEXT: store i32 2, i32* [[TMP33]], align 4, !noalias !26
916 // CHECK-64-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2
917 // CHECK-64-NEXT: store i8** [[TMP21]], i8*** [[TMP34]], align 8, !noalias !26
918 // CHECK-64-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3
919 // CHECK-64-NEXT: store i8** [[TMP22]], i8*** [[TMP35]], align 8, !noalias !26
920 // CHECK-64-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4
921 // CHECK-64-NEXT: store i64* [[TMP23]], i64** [[TMP36]], align 8, !noalias !26
922 // CHECK-64-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5
923 // CHECK-64-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP37]], align 8, !noalias !26
924 // CHECK-64-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6
925 // CHECK-64-NEXT: store i8** null, i8*** [[TMP38]], align 8, !noalias !26
926 // CHECK-64-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7
927 // CHECK-64-NEXT: store i8** null, i8*** [[TMP39]], align 8, !noalias !26
928 // CHECK-64-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8
929 // CHECK-64-NEXT: store i64 [[TMP31]], i64* [[TMP40]], align 8, !noalias !26
930 // CHECK-64-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 9
931 // CHECK-64-NEXT: store i64 0, i64* [[TMP41]], align 8, !noalias !26
932 // CHECK-64-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 10
933 // CHECK-64-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP42]], align 4, !noalias !26
934 // CHECK-64-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 11
935 // CHECK-64-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP43]], align 4, !noalias !26
936 // CHECK-64-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 12
937 // CHECK-64-NEXT: store i32 0, i32* [[TMP44]], align 4, !noalias !26
938 // CHECK-64-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 [[TMP26]], i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]])
939 // CHECK-64-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0
940 // CHECK-64-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__5_EXIT:%.*]]
941 // CHECK-64: omp_offload.failed.i:
942 // CHECK-64-NEXT: [[TMP47:%.*]] = load i64*, i64** [[TMP16]], align 8
943 // CHECK-64-NEXT: [[TMP48:%.*]] = load i32, i32* @global, align 4, !noalias !26
944 // CHECK-64-NEXT: [[CONV7_I:%.*]] = bitcast i64* [[GLOBAL_CASTED_I]] to i32*
945 // CHECK-64-NEXT: store i32 [[TMP48]], i32* [[CONV7_I]], align 4, !noalias !26
946 // CHECK-64-NEXT: [[TMP49:%.*]] = load i64, i64* [[GLOBAL_CASTED_I]], align 8, !noalias !26
947 // CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76(i64* [[TMP47]], i64 [[TMP49]]) #[[ATTR3]]
948 // CHECK-64-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]]
949 // CHECK-64: .omp_outlined..5.exit:
950 // CHECK-64-NEXT: ret i32 0
951 // CHECK-64-LABEL: define {{[^@]+}}@.omp_task_privates_map..8
952 // CHECK-64-SAME: (%struct..kmp_privates.t.3* noalias noundef [[TMP0:%.*]], i64*** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]]) #[[ATTR7]] {
953 // CHECK-64-NEXT: entry:
954 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t.3*, align 8
955 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i64***, align 8
956 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i32**, align 8
957 // CHECK-64-NEXT: store %struct..kmp_privates.t.3* [[TMP0]], %struct..kmp_privates.t.3** [[DOTADDR]], align 8
958 // CHECK-64-NEXT: store i64*** [[TMP1]], i64**** [[DOTADDR1]], align 8
959 // CHECK-64-NEXT: store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
960 // CHECK-64-NEXT: [[TMP3:%.*]] = load %struct..kmp_privates.t.3*, %struct..kmp_privates.t.3** [[DOTADDR]], align 8
961 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 0
962 // CHECK-64-NEXT: [[TMP5:%.*]] = load i64***, i64**** [[DOTADDR1]], align 8
963 // CHECK-64-NEXT: store i64** [[TMP4]], i64*** [[TMP5]], align 8
964 // CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 1
965 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
966 // CHECK-64-NEXT: store i32* [[TMP6]], i32** [[TMP7]], align 8
967 // CHECK-64-NEXT: ret void
968 // CHECK-64-LABEL: define {{[^@]+}}@.omp_task_entry..9
969 // CHECK-64-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
970 // CHECK-64-NEXT: entry:
971 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
972 // CHECK-64-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
973 // CHECK-64-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
974 // CHECK-64-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
975 // CHECK-64-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
976 // CHECK-64-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 8
977 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i64**, align 8
978 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
979 // CHECK-64-NEXT: [[GLOBAL_CASTED_I:%.*]] = alloca i64, align 8
980 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
981 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
982 // CHECK-64-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
983 // CHECK-64-NEXT: store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
984 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
985 // CHECK-64-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
986 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 0
987 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
988 // CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
989 // CHECK-64-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
990 // CHECK-64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
991 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 1
992 // CHECK-64-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t.3* [[TMP9]] to i8*
993 // CHECK-64-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.2* [[TMP3]] to i8*
994 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]])
995 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]])
996 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META32:![0-9]+]])
997 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]])
998 // CHECK-64-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !36
999 // CHECK-64-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !36
1000 // CHECK-64-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !36
1001 // CHECK-64-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t.3*, i64***, i32**)* @.omp_task_privates_map..8 to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !36
1002 // CHECK-64-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !36
1003 // CHECK-64-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !36
1004 // CHECK-64-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 8, !noalias !36
1005 // CHECK-64-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !36
1006 // CHECK-64-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !36
1007 // CHECK-64-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i64***, i32**)*
1008 // CHECK-64-NEXT: call void [[TMP15]](i8* [[TMP14]], i64*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]]
1009 // CHECK-64-NEXT: [[TMP16:%.*]] = load i64**, i64*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !36
1010 // CHECK-64-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !36
1011 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 2
1012 // CHECK-64-NEXT: [[TMP19:%.*]] = load i64*, i64** [[TMP16]], align 8
1013 // CHECK-64-NEXT: [[TMP20:%.*]] = load i32, i32* @global, align 4, !noalias !36
1014 // CHECK-64-NEXT: [[CONV_I:%.*]] = bitcast i64* [[GLOBAL_CASTED_I]] to i32*
1015 // CHECK-64-NEXT: store i32 [[TMP20]], i32* [[CONV_I]], align 4, !noalias !36
1016 // CHECK-64-NEXT: [[TMP21:%.*]] = load i64, i64* [[GLOBAL_CASTED_I]], align 8, !noalias !36
1017 // CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76(i64* [[TMP19]], i64 [[TMP21]]) #[[ATTR3]]
1018 // CHECK-64-NEXT: ret i32 0
1019 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l83
1020 // CHECK-64-SAME: (i64 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
1021 // CHECK-64-NEXT: entry:
1022 // CHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
1023 // CHECK-64-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i64, align 8
1024 // CHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
1025 // CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
1026 // CHECK-64-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
1027 // CHECK-64-NEXT: [[CONV1:%.*]] = bitcast i64* [[GLOBAL_CASTED]] to i32*
1028 // CHECK-64-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4
1029 // CHECK-64-NEXT: [[TMP1:%.*]] = load i64, i64* [[GLOBAL_CASTED]], align 8
1030 // CHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP1]])
1031 // CHECK-64-NEXT: ret void
1032 // CHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..10
1033 // CHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
1034 // CHECK-64-NEXT: entry:
1035 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1036 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1037 // CHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
1038 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1039 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
1040 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1041 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1042 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
1043 // CHECK-64-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1044 // CHECK-64-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1045 // CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1046 // CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1047 // CHECK-64-NEXT: [[I3:%.*]] = alloca i32, align 4
1048 // CHECK-64-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i64, align 8
1049 // CHECK-64-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
1050 // CHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1051 // CHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1052 // CHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
1053 // CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
1054 // CHECK-64-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
1055 // CHECK-64-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
1056 // CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1057 // CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
1058 // CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1059 // CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1060 // CHECK-64-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1061 // CHECK-64-NEXT: store i32 0, i32* [[I]], align 4
1062 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1063 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]]
1064 // CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1065 // CHECK-64: omp.precond.then:
1066 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1067 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1068 // CHECK-64-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_COMB_UB]], align 4
1069 // CHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1070 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1071 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1072 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
1073 // CHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1074 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1075 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1076 // CHECK-64-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
1077 // CHECK-64-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1078 // CHECK-64: cond.true:
1079 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1080 // CHECK-64-NEXT: br label [[COND_END:%.*]]
1081 // CHECK-64: cond.false:
1082 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1083 // CHECK-64-NEXT: br label [[COND_END]]
1084 // CHECK-64: cond.end:
1085 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
1086 // CHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1087 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1088 // CHECK-64-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
1089 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1090 // CHECK-64: omp.inner.for.cond:
1091 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1092 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1093 // CHECK-64-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
1094 // CHECK-64-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1095 // CHECK-64: omp.inner.for.body:
1096 // CHECK-64-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1097 // CHECK-64-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
1098 // CHECK-64-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1099 // CHECK-64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
1100 // CHECK-64-NEXT: [[TMP17:%.*]] = load i32, i32* [[CONV]], align 4
1101 // CHECK-64-NEXT: [[CONV6:%.*]] = bitcast i64* [[GLOBAL_CASTED]] to i32*
1102 // CHECK-64-NEXT: store i32 [[TMP17]], i32* [[CONV6]], align 4
1103 // CHECK-64-NEXT: [[TMP18:%.*]] = load i64, i64* [[GLOBAL_CASTED]], align 8
1104 // CHECK-64-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1105 // CHECK-64-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4
1106 // CHECK-64-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP20]])
1107 // CHECK-64-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1108 // CHECK-64-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
1109 // CHECK-64-NEXT: call void @.omp_outlined..11(i32* [[TMP21]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP14]], i64 [[TMP16]], i64 [[TMP18]]) #[[ATTR3]]
1110 // CHECK-64-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP20]])
1111 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1112 // CHECK-64: omp.inner.for.inc:
1113 // CHECK-64-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1114 // CHECK-64-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1115 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
1116 // CHECK-64-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1117 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
1118 // CHECK-64: omp.inner.for.end:
1119 // CHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1120 // CHECK-64: omp.loop.exit:
1121 // CHECK-64-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1122 // CHECK-64-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
1123 // CHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]])
1124 // CHECK-64-NEXT: br label [[OMP_PRECOND_END]]
1125 // CHECK-64: omp.precond.end:
1126 // CHECK-64-NEXT: ret void
1127 // CHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..11
1128 // CHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
1129 // CHECK-64-NEXT: entry:
1130 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1131 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1132 // CHECK-64-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1133 // CHECK-64-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1134 // CHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
1135 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1136 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
1137 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1138 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1139 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
1140 // CHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1141 // CHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1142 // CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1143 // CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1144 // CHECK-64-NEXT: [[I5:%.*]] = alloca i32, align 4
1145 // CHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1146 // CHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1147 // CHECK-64-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1148 // CHECK-64-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1149 // CHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
1150 // CHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
1151 // CHECK-64-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4
1152 // CHECK-64-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
1153 // CHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1154 // CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
1155 // CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1156 // CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1157 // CHECK-64-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1158 // CHECK-64-NEXT: store i32 0, i32* [[I]], align 4
1159 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1160 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]]
1161 // CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1162 // CHECK-64: omp.precond.then:
1163 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1164 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1165 // CHECK-64-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4
1166 // CHECK-64-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
1167 // CHECK-64-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP4]] to i32
1168 // CHECK-64-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
1169 // CHECK-64-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
1170 // CHECK-64-NEXT: store i32 [[CONV3]], i32* [[DOTOMP_LB]], align 4
1171 // CHECK-64-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_UB]], align 4
1172 // CHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1173 // CHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1174 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1175 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
1176 // CHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1177 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1178 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1179 // CHECK-64-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
1180 // CHECK-64-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1181 // CHECK-64: cond.true:
1182 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1183 // CHECK-64-NEXT: br label [[COND_END:%.*]]
1184 // CHECK-64: cond.false:
1185 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1186 // CHECK-64-NEXT: br label [[COND_END]]
1187 // CHECK-64: cond.end:
1188 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
1189 // CHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1190 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1191 // CHECK-64-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
1192 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1193 // CHECK-64: omp.inner.for.cond:
1194 // CHECK-64-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1195 // CHECK-64-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1196 // CHECK-64-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
1197 // CHECK-64-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1198 // CHECK-64: omp.inner.for.body:
1199 // CHECK-64-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1200 // CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
1201 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1202 // CHECK-64-NEXT: store i32 [[ADD]], i32* [[I5]], align 4
1203 // CHECK-64-NEXT: [[TMP16:%.*]] = load i32, i32* [[CONV]], align 4
1204 // CHECK-64-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1
1205 // CHECK-64-NEXT: store i32 [[ADD8]], i32* [[CONV]], align 4
1206 // CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1207 // CHECK-64: omp.body.continue:
1208 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1209 // CHECK-64: omp.inner.for.inc:
1210 // CHECK-64-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1211 // CHECK-64-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP17]], 1
1212 // CHECK-64-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4
1213 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
1214 // CHECK-64: omp.inner.for.end:
1215 // CHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1216 // CHECK-64: omp.loop.exit:
1217 // CHECK-64-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1218 // CHECK-64-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
1219 // CHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]])
1220 // CHECK-64-NEXT: br label [[OMP_PRECOND_END]]
1221 // CHECK-64: omp.precond.end:
1222 // CHECK-64-NEXT: ret void
1223 // CHECK-64-LABEL: define {{[^@]+}}@.omp_task_privates_map..13
1224 // CHECK-64-SAME: (%struct..kmp_privates.t.6* noalias noundef [[TMP0:%.*]], i32** noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
1225 // CHECK-64-NEXT: entry:
1226 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t.6*, align 8
1227 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32**, align 8
1228 // CHECK-64-NEXT: store %struct..kmp_privates.t.6* [[TMP0]], %struct..kmp_privates.t.6** [[DOTADDR]], align 8
1229 // CHECK-64-NEXT: store i32** [[TMP1]], i32*** [[DOTADDR1]], align 8
1230 // CHECK-64-NEXT: [[TMP2:%.*]] = load %struct..kmp_privates.t.6*, %struct..kmp_privates.t.6** [[DOTADDR]], align 8
1231 // CHECK-64-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_6:%.*]], %struct..kmp_privates.t.6* [[TMP2]], i32 0, i32 0
1232 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[DOTADDR1]], align 8
1233 // CHECK-64-NEXT: store i32* [[TMP3]], i32** [[TMP4]], align 8
1234 // CHECK-64-NEXT: ret void
1235 // CHECK-64-LABEL: define {{[^@]+}}@.omp_task_entry..14
1236 // CHECK-64-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates.5* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
1237 // CHECK-64-NEXT: entry:
1238 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1239 // CHECK-64-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1240 // CHECK-64-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1241 // CHECK-64-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1242 // CHECK-64-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1243 // CHECK-64-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.4*, align 8
1244 // CHECK-64-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32*, align 8
1245 // CHECK-64-NEXT: [[GLOBAL_CASTED_I:%.*]] = alloca i64, align 8
1246 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
1247 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.5*, align 8
1248 // CHECK-64-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1249 // CHECK-64-NEXT: store %struct.kmp_task_t_with_privates.5* [[TMP1]], %struct.kmp_task_t_with_privates.5** [[DOTADDR1]], align 8
1250 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1251 // CHECK-64-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.5*, %struct.kmp_task_t_with_privates.5** [[DOTADDR1]], align 8
1252 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], %struct.kmp_task_t_with_privates.5* [[TMP3]], i32 0, i32 0
1253 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1254 // CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1255 // CHECK-64-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1256 // CHECK-64-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.4*
1257 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5]], %struct.kmp_task_t_with_privates.5* [[TMP3]], i32 0, i32 1
1258 // CHECK-64-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t.6* [[TMP9]] to i8*
1259 // CHECK-64-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.5* [[TMP3]] to i8*
1260 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]])
1261 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]])
1262 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META42:![0-9]+]])
1263 // CHECK-64-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]])
1264 // CHECK-64-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !46
1265 // CHECK-64-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !46
1266 // CHECK-64-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !46
1267 // CHECK-64-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t.6*, i32**)* @.omp_task_privates_map..13 to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !46
1268 // CHECK-64-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !46
1269 // CHECK-64-NEXT: store %struct.anon.4* [[TMP8]], %struct.anon.4** [[__CONTEXT_ADDR_I]], align 8, !noalias !46
1270 // CHECK-64-NEXT: [[TMP12:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR_I]], align 8, !noalias !46
1271 // CHECK-64-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !46
1272 // CHECK-64-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !46
1273 // CHECK-64-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i32**)*
1274 // CHECK-64-NEXT: call void [[TMP15]](i8* [[TMP14]], i32** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR3]]
1275 // CHECK-64-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !46
1276 // CHECK-64-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
1277 // CHECK-64-NEXT: [[CONV_I:%.*]] = bitcast i64* [[GLOBAL_CASTED_I]] to i32*
1278 // CHECK-64-NEXT: store i32 [[TMP17]], i32* [[CONV_I]], align 4, !noalias !46
1279 // CHECK-64-NEXT: [[TMP18:%.*]] = load i64, i64* [[GLOBAL_CASTED_I]], align 8, !noalias !46
1280 // CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l83(i64 [[TMP18]]) #[[ATTR3]]
1281 // CHECK-64-NEXT: ret i32 0
1282 // CHECK-64-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1283 // CHECK-64-SAME: () #[[ATTR7]] {
1284 // CHECK-64-NEXT: entry:
1285 // CHECK-64-NEXT: call void @__tgt_register_requires(i64 1)
1286 // CHECK-64-NEXT: ret void
1287 // CHECK-32-LABEL: define {{[^@]+}}@_Z3fooi
1288 // CHECK-32-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
1289 // CHECK-32-NEXT: entry:
1290 // CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
1291 // CHECK-32-NEXT: [[A:%.*]] = alloca i32, align 4
1292 // CHECK-32-NEXT: [[AA:%.*]] = alloca i16, align 2
1293 // CHECK-32-NEXT: [[B:%.*]] = alloca [10 x float], align 4
1294 // CHECK-32-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4
1295 // CHECK-32-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
1296 // CHECK-32-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8
1297 // CHECK-32-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4
1298 // CHECK-32-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4
1299 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1300 // CHECK-32-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4
1301 // CHECK-32-NEXT: [[DOTDEP_ARR_ADDR:%.*]] = alloca [4 x %struct.kmp_depend_info], align 4
1302 // CHECK-32-NEXT: [[DEP_COUNTER_ADDR:%.*]] = alloca i32, align 4
1303 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
1304 // CHECK-32-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i32, align 4
1305 // CHECK-32-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
1306 // CHECK-32-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
1307 // CHECK-32-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
1308 // CHECK-32-NEXT: [[AGG_CAPTURED4:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
1309 // CHECK-32-NEXT: [[DOTDEP_ARR_ADDR5:%.*]] = alloca [3 x %struct.kmp_depend_info], align 4
1310 // CHECK-32-NEXT: [[DEP_COUNTER_ADDR6:%.*]] = alloca i32, align 4
1311 // CHECK-32-NEXT: [[AGG_CAPTURED7:%.*]] = alloca [[STRUCT_ANON_0]], align 4
1312 // CHECK-32-NEXT: [[DOTDEP_ARR_ADDR8:%.*]] = alloca [3 x %struct.kmp_depend_info], align 4
1313 // CHECK-32-NEXT: [[DEP_COUNTER_ADDR9:%.*]] = alloca i32, align 4
1314 // CHECK-32-NEXT: [[GLOBAL_CASTED10:%.*]] = alloca i32, align 4
1315 // CHECK-32-NEXT: [[AGG_CAPTURED11:%.*]] = alloca [[STRUCT_ANON_4:%.*]], align 4
1316 // CHECK-32-NEXT: [[DOTDEP_ARR_ADDR12:%.*]] = alloca [1 x %struct.kmp_depend_info], align 4
1317 // CHECK-32-NEXT: [[DEP_COUNTER_ADDR13:%.*]] = alloca i32, align 4
1318 // CHECK-32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]])
1319 // CHECK-32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4
1320 // CHECK-32-NEXT: store i32 0, i32* [[A]], align 4
1321 // CHECK-32-NEXT: store i16 0, i16* [[AA]], align 2
1322 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
1323 // CHECK-32-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave()
1324 // CHECK-32-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4
1325 // CHECK-32-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4
1326 // CHECK-32-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4
1327 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4
1328 // CHECK-32-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]]
1329 // CHECK-32-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8
1330 // CHECK-32-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4
1331 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* @global, align 4
1332 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4
1333 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP6]]
1334 // CHECK-32-NEXT: store i32 [[ADD]], i32* [[DOTCAPTURE_EXPR_]], align 4
1335 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
1336 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1337 // CHECK-32-NEXT: store i32 [[TMP8]], i32* [[TMP7]], align 4
1338 // CHECK-32-NEXT: [[TMP9:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 20, i32 4, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
1339 // CHECK-32-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to %struct.kmp_task_t_with_privates*
1340 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP10]], i32 0, i32 0
1341 // CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 0
1342 // CHECK-32-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
1343 // CHECK-32-NEXT: [[TMP14:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
1344 // CHECK-32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP13]], i8* align 4 [[TMP14]], i32 4, i1 false)
1345 // CHECK-32-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], [4 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR]], i32 0, i32 0
1346 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], %struct.kmp_depend_info* [[TMP15]], i32 0
1347 // CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP16]], i32 0, i32 0
1348 // CHECK-32-NEXT: store i32 ptrtoint (i32* @global to i32), i32* [[TMP17]], align 4
1349 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP16]], i32 0, i32 1
1350 // CHECK-32-NEXT: store i32 4, i32* [[TMP18]], align 4
1351 // CHECK-32-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP16]], i32 0, i32 2
1352 // CHECK-32-NEXT: store i8 1, i8* [[TMP19]], align 4
1353 // CHECK-32-NEXT: [[TMP20:%.*]] = ptrtoint i32* [[A]] to i32
1354 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP15]], i32 1
1355 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP21]], i32 0, i32 0
1356 // CHECK-32-NEXT: store i32 [[TMP20]], i32* [[TMP22]], align 4
1357 // CHECK-32-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP21]], i32 0, i32 1
1358 // CHECK-32-NEXT: store i32 4, i32* [[TMP23]], align 4
1359 // CHECK-32-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP21]], i32 0, i32 2
1360 // CHECK-32-NEXT: store i8 3, i8* [[TMP24]], align 4
1361 // CHECK-32-NEXT: [[TMP25:%.*]] = ptrtoint [10 x float]* [[B]] to i32
1362 // CHECK-32-NEXT: [[TMP26:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP15]], i32 2
1363 // CHECK-32-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP26]], i32 0, i32 0
1364 // CHECK-32-NEXT: store i32 [[TMP25]], i32* [[TMP27]], align 4
1365 // CHECK-32-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP26]], i32 0, i32 1
1366 // CHECK-32-NEXT: store i32 40, i32* [[TMP28]], align 4
1367 // CHECK-32-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP26]], i32 0, i32 2
1368 // CHECK-32-NEXT: store i8 3, i8* [[TMP29]], align 4
1369 // CHECK-32-NEXT: [[TMP30:%.*]] = mul nsw i32 4, [[TMP3]]
1370 // CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP30]]
1371 // CHECK-32-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP3]], 8
1372 // CHECK-32-NEXT: [[TMP32:%.*]] = ptrtoint double* [[ARRAYIDX]] to i32
1373 // CHECK-32-NEXT: [[TMP33:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP15]], i32 3
1374 // CHECK-32-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP33]], i32 0, i32 0
1375 // CHECK-32-NEXT: store i32 [[TMP32]], i32* [[TMP34]], align 4
1376 // CHECK-32-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP33]], i32 0, i32 1
1377 // CHECK-32-NEXT: store i32 [[TMP31]], i32* [[TMP35]], align 4
1378 // CHECK-32-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP33]], i32 0, i32 2
1379 // CHECK-32-NEXT: store i8 3, i8* [[TMP36]], align 4
1380 // CHECK-32-NEXT: store i32 4, i32* [[DEP_COUNTER_ADDR]], align 4
1381 // CHECK-32-NEXT: [[TMP37:%.*]] = bitcast %struct.kmp_depend_info* [[TMP15]] to i8*
1382 // CHECK-32-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 4, i8* [[TMP37]], i32 0, i8* null, i32 0)
1383 // CHECK-32-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP9]])
1384 // CHECK-32-NEXT: [[TMP38:%.*]] = call i32 @.omp_task_entry.(i32 [[TMP0]], %struct.kmp_task_t_with_privates* [[TMP10]]) #[[ATTR3:[0-9]+]]
1385 // CHECK-32-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP9]])
1386 // CHECK-32-NEXT: [[TMP39:%.*]] = load i32, i32* @global, align 4
1387 // CHECK-32-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4
1388 // CHECK-32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP39]], [[TMP40]]
1389 // CHECK-32-NEXT: store i32 [[ADD3]], i32* [[DOTCAPTURE_EXPR_2]], align 4
1390 // CHECK-32-NEXT: [[TMP41:%.*]] = load i32*, i32** @_ZZ3fooiE6plocal, align 4
1391 // CHECK-32-NEXT: [[TMP42:%.*]] = load i32, i32* @global, align 4
1392 // CHECK-32-NEXT: store i32 [[TMP42]], i32* [[GLOBAL_CASTED]], align 4
1393 // CHECK-32-NEXT: [[TMP43:%.*]] = load i32, i32* [[GLOBAL_CASTED]], align 4
1394 // CHECK-32-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4
1395 // CHECK-32-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP44]], 0
1396 // CHECK-32-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]]
1397 // CHECK-32: omp_if.then:
1398 // CHECK-32-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1399 // CHECK-32-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32**
1400 // CHECK-32-NEXT: store i32* [[TMP41]], i32** [[TMP46]], align 4
1401 // CHECK-32-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1402 // CHECK-32-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32**
1403 // CHECK-32-NEXT: store i32* [[TMP41]], i32** [[TMP48]], align 4
1404 // CHECK-32-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
1405 // CHECK-32-NEXT: store i8* null, i8** [[TMP49]], align 4
1406 // CHECK-32-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
1407 // CHECK-32-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32*
1408 // CHECK-32-NEXT: store i32 [[TMP43]], i32* [[TMP51]], align 4
1409 // CHECK-32-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
1410 // CHECK-32-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32*
1411 // CHECK-32-NEXT: store i32 [[TMP43]], i32* [[TMP53]], align 4
1412 // CHECK-32-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
1413 // CHECK-32-NEXT: store i8* null, i8** [[TMP54]], align 4
1414 // CHECK-32-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1415 // CHECK-32-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1416 // CHECK-32-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED4]], i32 0, i32 0
1417 // CHECK-32-NEXT: [[TMP58:%.*]] = load i32*, i32** @_ZZ3fooiE6plocal, align 4
1418 // CHECK-32-NEXT: store i32* [[TMP58]], i32** [[TMP57]], align 4
1419 // CHECK-32-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED4]], i32 0, i32 1
1420 // CHECK-32-NEXT: [[TMP60:%.*]] = load i32, i32* @global, align 4
1421 // CHECK-32-NEXT: store i32 [[TMP60]], i32* [[TMP59]], align 4
1422 // CHECK-32-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED4]], i32 0, i32 2
1423 // CHECK-32-NEXT: [[TMP62:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1424 // CHECK-32-NEXT: store i32 [[TMP62]], i32* [[TMP61]], align 4
1425 // CHECK-32-NEXT: [[TMP63:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 60, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.1*)* @.omp_task_entry..6 to i32 (i32, i8*)*))
1426 // CHECK-32-NEXT: [[TMP64:%.*]] = bitcast i8* [[TMP63]] to %struct.kmp_task_t_with_privates.1*
1427 // CHECK-32-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP64]], i32 0, i32 0
1428 // CHECK-32-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP65]], i32 0, i32 0
1429 // CHECK-32-NEXT: [[TMP67:%.*]] = load i8*, i8** [[TMP66]], align 4
1430 // CHECK-32-NEXT: [[TMP68:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED4]] to i8*
1431 // CHECK-32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP67]], i8* align 4 [[TMP68]], i32 12, i1 false)
1432 // CHECK-32-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1]], %struct.kmp_task_t_with_privates.1* [[TMP64]], i32 0, i32 1
1433 // CHECK-32-NEXT: [[TMP70:%.*]] = bitcast i8* [[TMP67]] to %struct.anon.0*
1434 // CHECK-32-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP69]], i32 0, i32 0
1435 // CHECK-32-NEXT: [[TMP72:%.*]] = bitcast [2 x i64]* [[TMP71]] to i8*
1436 // CHECK-32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP72]], i8* align 4 bitcast ([2 x i64]* @.offload_sizes to i8*), i32 16, i1 false)
1437 // CHECK-32-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP69]], i32 0, i32 1
1438 // CHECK-32-NEXT: [[TMP74:%.*]] = load i32*, i32** @_ZZ3fooiE6plocal, align 4
1439 // CHECK-32-NEXT: store i32* [[TMP74]], i32** [[TMP73]], align 4
1440 // CHECK-32-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP69]], i32 0, i32 2
1441 // CHECK-32-NEXT: [[TMP76:%.*]] = load i32, i32* @global, align 4
1442 // CHECK-32-NEXT: store i32 [[TMP76]], i32* [[TMP75]], align 4
1443 // CHECK-32-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP69]], i32 0, i32 3
1444 // CHECK-32-NEXT: [[TMP78:%.*]] = bitcast [2 x i8*]* [[TMP77]] to i8*
1445 // CHECK-32-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP55]] to i8*
1446 // CHECK-32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP78]], i8* align 4 [[TMP79]], i32 8, i1 false)
1447 // CHECK-32-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP69]], i32 0, i32 4
1448 // CHECK-32-NEXT: [[TMP81:%.*]] = bitcast [2 x i8*]* [[TMP80]] to i8*
1449 // CHECK-32-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP56]] to i8*
1450 // CHECK-32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP81]], i8* align 4 [[TMP82]], i32 8, i1 false)
1451 // CHECK-32-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x %struct.kmp_depend_info], [3 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR5]], i32 0, i32 0
1452 // CHECK-32-NEXT: [[TMP84:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP83]], i32 0
1453 // CHECK-32-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP84]], i32 0, i32 0
1454 // CHECK-32-NEXT: store i32 ptrtoint (i32* @global to i32), i32* [[TMP85]], align 4
1455 // CHECK-32-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP84]], i32 0, i32 1
1456 // CHECK-32-NEXT: store i32 4, i32* [[TMP86]], align 4
1457 // CHECK-32-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP84]], i32 0, i32 2
1458 // CHECK-32-NEXT: store i8 3, i8* [[TMP87]], align 4
1459 // CHECK-32-NEXT: [[TMP88:%.*]] = ptrtoint i32* [[A]] to i32
1460 // CHECK-32-NEXT: [[TMP89:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP83]], i32 1
1461 // CHECK-32-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP89]], i32 0, i32 0
1462 // CHECK-32-NEXT: store i32 [[TMP88]], i32* [[TMP90]], align 4
1463 // CHECK-32-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP89]], i32 0, i32 1
1464 // CHECK-32-NEXT: store i32 4, i32* [[TMP91]], align 4
1465 // CHECK-32-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP89]], i32 0, i32 2
1466 // CHECK-32-NEXT: store i8 3, i8* [[TMP92]], align 4
1467 // CHECK-32-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP1]], 4
1468 // CHECK-32-NEXT: [[TMP94:%.*]] = ptrtoint float* [[VLA]] to i32
1469 // CHECK-32-NEXT: [[TMP95:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP83]], i32 2
1470 // CHECK-32-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP95]], i32 0, i32 0
1471 // CHECK-32-NEXT: store i32 [[TMP94]], i32* [[TMP96]], align 4
1472 // CHECK-32-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP95]], i32 0, i32 1
1473 // CHECK-32-NEXT: store i32 [[TMP93]], i32* [[TMP97]], align 4
1474 // CHECK-32-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP95]], i32 0, i32 2
1475 // CHECK-32-NEXT: store i8 3, i8* [[TMP98]], align 4
1476 // CHECK-32-NEXT: store i32 3, i32* [[DEP_COUNTER_ADDR6]], align 4
1477 // CHECK-32-NEXT: [[TMP99:%.*]] = bitcast %struct.kmp_depend_info* [[TMP83]] to i8*
1478 // CHECK-32-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 3, i8* [[TMP99]], i32 0, i8* null, i32 0)
1479 // CHECK-32-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP63]])
1480 // CHECK-32-NEXT: [[TMP100:%.*]] = call i32 @.omp_task_entry..6(i32 [[TMP0]], %struct.kmp_task_t_with_privates.1* [[TMP64]]) #[[ATTR3]]
1481 // CHECK-32-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP63]])
1482 // CHECK-32-NEXT: br label [[OMP_IF_END:%.*]]
1483 // CHECK-32: omp_if.else:
1484 // CHECK-32-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED7]], i32 0, i32 0
1485 // CHECK-32-NEXT: [[TMP102:%.*]] = load i32*, i32** @_ZZ3fooiE6plocal, align 4
1486 // CHECK-32-NEXT: store i32* [[TMP102]], i32** [[TMP101]], align 4
1487 // CHECK-32-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED7]], i32 0, i32 1
1488 // CHECK-32-NEXT: [[TMP104:%.*]] = load i32, i32* @global, align 4
1489 // CHECK-32-NEXT: store i32 [[TMP104]], i32* [[TMP103]], align 4
1490 // CHECK-32-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED7]], i32 0, i32 2
1491 // CHECK-32-NEXT: [[TMP106:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4
1492 // CHECK-32-NEXT: store i32 [[TMP106]], i32* [[TMP105]], align 4
1493 // CHECK-32-NEXT: [[TMP107:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 28, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.2*)* @.omp_task_entry..9 to i32 (i32, i8*)*))
1494 // CHECK-32-NEXT: [[TMP108:%.*]] = bitcast i8* [[TMP107]] to %struct.kmp_task_t_with_privates.2*
1495 // CHECK-32-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP108]], i32 0, i32 0
1496 // CHECK-32-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP109]], i32 0, i32 0
1497 // CHECK-32-NEXT: [[TMP111:%.*]] = load i8*, i8** [[TMP110]], align 4
1498 // CHECK-32-NEXT: [[TMP112:%.*]] = bitcast %struct.anon.0* [[AGG_CAPTURED7]] to i8*
1499 // CHECK-32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP111]], i8* align 4 [[TMP112]], i32 12, i1 false)
1500 // CHECK-32-NEXT: [[TMP113:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP108]], i32 0, i32 1
1501 // CHECK-32-NEXT: [[TMP114:%.*]] = bitcast i8* [[TMP111]] to %struct.anon.0*
1502 // CHECK-32-NEXT: [[TMP115:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP113]], i32 0, i32 0
1503 // CHECK-32-NEXT: [[TMP116:%.*]] = load i32*, i32** @_ZZ3fooiE6plocal, align 4
1504 // CHECK-32-NEXT: store i32* [[TMP116]], i32** [[TMP115]], align 4
1505 // CHECK-32-NEXT: [[TMP117:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP113]], i32 0, i32 1
1506 // CHECK-32-NEXT: [[TMP118:%.*]] = load i32, i32* @global, align 4
1507 // CHECK-32-NEXT: store i32 [[TMP118]], i32* [[TMP117]], align 4
1508 // CHECK-32-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x %struct.kmp_depend_info], [3 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR8]], i32 0, i32 0
1509 // CHECK-32-NEXT: [[TMP120:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP119]], i32 0
1510 // CHECK-32-NEXT: [[TMP121:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP120]], i32 0, i32 0
1511 // CHECK-32-NEXT: store i32 ptrtoint (i32* @global to i32), i32* [[TMP121]], align 4
1512 // CHECK-32-NEXT: [[TMP122:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP120]], i32 0, i32 1
1513 // CHECK-32-NEXT: store i32 4, i32* [[TMP122]], align 4
1514 // CHECK-32-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP120]], i32 0, i32 2
1515 // CHECK-32-NEXT: store i8 3, i8* [[TMP123]], align 4
1516 // CHECK-32-NEXT: [[TMP124:%.*]] = ptrtoint i32* [[A]] to i32
1517 // CHECK-32-NEXT: [[TMP125:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP119]], i32 1
1518 // CHECK-32-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP125]], i32 0, i32 0
1519 // CHECK-32-NEXT: store i32 [[TMP124]], i32* [[TMP126]], align 4
1520 // CHECK-32-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP125]], i32 0, i32 1
1521 // CHECK-32-NEXT: store i32 4, i32* [[TMP127]], align 4
1522 // CHECK-32-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP125]], i32 0, i32 2
1523 // CHECK-32-NEXT: store i8 3, i8* [[TMP128]], align 4
1524 // CHECK-32-NEXT: [[TMP129:%.*]] = mul nuw i32 [[TMP1]], 4
1525 // CHECK-32-NEXT: [[TMP130:%.*]] = ptrtoint float* [[VLA]] to i32
1526 // CHECK-32-NEXT: [[TMP131:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP119]], i32 2
1527 // CHECK-32-NEXT: [[TMP132:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP131]], i32 0, i32 0
1528 // CHECK-32-NEXT: store i32 [[TMP130]], i32* [[TMP132]], align 4
1529 // CHECK-32-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP131]], i32 0, i32 1
1530 // CHECK-32-NEXT: store i32 [[TMP129]], i32* [[TMP133]], align 4
1531 // CHECK-32-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP131]], i32 0, i32 2
1532 // CHECK-32-NEXT: store i8 3, i8* [[TMP134]], align 4
1533 // CHECK-32-NEXT: store i32 3, i32* [[DEP_COUNTER_ADDR9]], align 4
1534 // CHECK-32-NEXT: [[TMP135:%.*]] = bitcast %struct.kmp_depend_info* [[TMP119]] to i8*
1535 // CHECK-32-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 3, i8* [[TMP135]], i32 0, i8* null, i32 0)
1536 // CHECK-32-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP107]])
1537 // CHECK-32-NEXT: [[TMP136:%.*]] = call i32 @.omp_task_entry..9(i32 [[TMP0]], %struct.kmp_task_t_with_privates.2* [[TMP108]]) #[[ATTR3]]
1538 // CHECK-32-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP107]])
1539 // CHECK-32-NEXT: br label [[OMP_IF_END]]
1540 // CHECK-32: omp_if.end:
1541 // CHECK-32-NEXT: [[TMP137:%.*]] = load i32, i32* @global, align 4
1542 // CHECK-32-NEXT: store i32 [[TMP137]], i32* [[GLOBAL_CASTED10]], align 4
1543 // CHECK-32-NEXT: [[TMP138:%.*]] = load i32, i32* [[GLOBAL_CASTED10]], align 4
1544 // CHECK-32-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], %struct.anon.4* [[AGG_CAPTURED11]], i32 0, i32 0
1545 // CHECK-32-NEXT: [[TMP140:%.*]] = load i32, i32* @global, align 4
1546 // CHECK-32-NEXT: store i32 [[TMP140]], i32* [[TMP139]], align 4
1547 // CHECK-32-NEXT: [[TMP141:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i32 24, i32 4, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.5*)* @.omp_task_entry..14 to i32 (i32, i8*)*))
1548 // CHECK-32-NEXT: [[TMP142:%.*]] = bitcast i8* [[TMP141]] to %struct.kmp_task_t_with_privates.5*
1549 // CHECK-32-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], %struct.kmp_task_t_with_privates.5* [[TMP142]], i32 0, i32 0
1550 // CHECK-32-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP143]], i32 0, i32 0
1551 // CHECK-32-NEXT: [[TMP145:%.*]] = load i8*, i8** [[TMP144]], align 4
1552 // CHECK-32-NEXT: [[TMP146:%.*]] = bitcast %struct.anon.4* [[AGG_CAPTURED11]] to i8*
1553 // CHECK-32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP145]], i8* align 4 [[TMP146]], i32 4, i1 false)
1554 // CHECK-32-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5]], %struct.kmp_task_t_with_privates.5* [[TMP142]], i32 0, i32 1
1555 // CHECK-32-NEXT: [[TMP148:%.*]] = bitcast i8* [[TMP145]] to %struct.anon.4*
1556 // CHECK-32-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_6:%.*]], %struct..kmp_privates.t.6* [[TMP147]], i32 0, i32 0
1557 // CHECK-32-NEXT: [[TMP150:%.*]] = load i32, i32* @global, align 4
1558 // CHECK-32-NEXT: store i32 [[TMP150]], i32* [[TMP149]], align 4
1559 // CHECK-32-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], [1 x %struct.kmp_depend_info]* [[DOTDEP_ARR_ADDR12]], i32 0, i32 0
1560 // CHECK-32-NEXT: [[TMP152:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP151]], i32 0
1561 // CHECK-32-NEXT: [[TMP153:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP152]], i32 0, i32 0
1562 // CHECK-32-NEXT: store i32 ptrtoint (i32* @global to i32), i32* [[TMP153]], align 4
1563 // CHECK-32-NEXT: [[TMP154:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP152]], i32 0, i32 1
1564 // CHECK-32-NEXT: store i32 4, i32* [[TMP154]], align 4
1565 // CHECK-32-NEXT: [[TMP155:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], %struct.kmp_depend_info* [[TMP152]], i32 0, i32 2
1566 // CHECK-32-NEXT: store i8 3, i8* [[TMP155]], align 4
1567 // CHECK-32-NEXT: store i32 1, i32* [[DEP_COUNTER_ADDR13]], align 4
1568 // CHECK-32-NEXT: [[TMP156:%.*]] = bitcast %struct.kmp_depend_info* [[TMP151]] to i8*
1569 // CHECK-32-NEXT: call void @__kmpc_omp_taskwait_deps_51(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i32 1, i8* [[TMP156]], i32 0, i8* null, i32 0)
1570 // CHECK-32-NEXT: call void @__kmpc_omp_task_begin_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP141]])
1571 // CHECK-32-NEXT: [[TMP157:%.*]] = call i32 @.omp_task_entry..14(i32 [[TMP0]], %struct.kmp_task_t_with_privates.5* [[TMP142]]) #[[ATTR3]]
1572 // CHECK-32-NEXT: call void @__kmpc_omp_task_complete_if0(%struct.ident_t* @[[GLOB3]], i32 [[TMP0]], i8* [[TMP141]])
1573 // CHECK-32-NEXT: [[TMP158:%.*]] = load i32, i32* [[A]], align 4
1574 // CHECK-32-NEXT: [[TMP159:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4
1575 // CHECK-32-NEXT: call void @llvm.stackrestore(i8* [[TMP159]])
1576 // CHECK-32-NEXT: ret i32 [[TMP158]]
1577 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66
1578 // CHECK-32-SAME: () #[[ATTR2:[0-9]+]] {
1579 // CHECK-32-NEXT: entry:
1580 // CHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1581 // CHECK-32-NEXT: ret void
1582 // CHECK-32-LABEL: define {{[^@]+}}@.omp_outlined.
1583 // CHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
1584 // CHECK-32-NEXT: entry:
1585 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1586 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1587 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1588 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1589 // CHECK-32-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1590 // CHECK-32-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1591 // CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1592 // CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1593 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1594 // CHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1595 // CHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1596 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1597 // CHECK-32-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
1598 // CHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1599 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1600 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1601 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1602 // CHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1603 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1604 // CHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
1605 // CHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1606 // CHECK-32: cond.true:
1607 // CHECK-32-NEXT: br label [[COND_END:%.*]]
1608 // CHECK-32: cond.false:
1609 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1610 // CHECK-32-NEXT: br label [[COND_END]]
1611 // CHECK-32: cond.end:
1612 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1613 // CHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1614 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1615 // CHECK-32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
1616 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1617 // CHECK-32: omp.inner.for.cond:
1618 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1619 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1620 // CHECK-32-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1621 // CHECK-32-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1622 // CHECK-32: omp.inner.for.body:
1623 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1624 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1625 // CHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP7]], i32 [[TMP8]])
1626 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1627 // CHECK-32: omp.inner.for.inc:
1628 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1629 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1630 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
1631 // CHECK-32-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1632 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
1633 // CHECK-32: omp.inner.for.end:
1634 // CHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1635 // CHECK-32: omp.loop.exit:
1636 // CHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
1637 // CHECK-32-NEXT: ret void
1638 // CHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..1
1639 // CHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] {
1640 // CHECK-32-NEXT: entry:
1641 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1642 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1643 // CHECK-32-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1644 // CHECK-32-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1645 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1646 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1647 // CHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1648 // CHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1649 // CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1650 // CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1651 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1652 // CHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1653 // CHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1654 // CHECK-32-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1655 // CHECK-32-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1656 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1657 // CHECK-32-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
1658 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1659 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1660 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_LB]], align 4
1661 // CHECK-32-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_UB]], align 4
1662 // CHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1663 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1664 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1665 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
1666 // CHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1667 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1668 // CHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
1669 // CHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1670 // CHECK-32: cond.true:
1671 // CHECK-32-NEXT: br label [[COND_END:%.*]]
1672 // CHECK-32: cond.false:
1673 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1674 // CHECK-32-NEXT: br label [[COND_END]]
1675 // CHECK-32: cond.end:
1676 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
1677 // CHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1678 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1679 // CHECK-32-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
1680 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1681 // CHECK-32: omp.inner.for.cond:
1682 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1683 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1684 // CHECK-32-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
1685 // CHECK-32-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1686 // CHECK-32: omp.inner.for.body:
1687 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1688 // CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
1689 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1690 // CHECK-32-NEXT: store i32 [[ADD]], i32* [[I]], align 4
1691 // CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1692 // CHECK-32: omp.body.continue:
1693 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1694 // CHECK-32: omp.inner.for.inc:
1695 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1696 // CHECK-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
1697 // CHECK-32-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
1698 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
1699 // CHECK-32: omp.inner.for.end:
1700 // CHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1701 // CHECK-32: omp.loop.exit:
1702 // CHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
1703 // CHECK-32-NEXT: ret void
1704 // CHECK-32-LABEL: define {{[^@]+}}@.omp_task_entry.
1705 // CHECK-32-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
1706 // CHECK-32-NEXT: entry:
1707 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1708 // CHECK-32-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
1709 // CHECK-32-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
1710 // CHECK-32-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
1711 // CHECK-32-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
1712 // CHECK-32-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4
1713 // CHECK-32-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1714 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
1715 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4
1716 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1717 // CHECK-32-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
1718 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1719 // CHECK-32-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4
1720 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1721 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1722 // CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1723 // CHECK-32-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
1724 // CHECK-32-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1725 // CHECK-32-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1726 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1727 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]])
1728 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]])
1729 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]])
1730 // CHECK-32-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !17
1731 // CHECK-32-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !17
1732 // CHECK-32-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !17
1733 // CHECK-32-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !17
1734 // CHECK-32-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !17
1735 // CHECK-32-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !17
1736 // CHECK-32-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !17
1737 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP10]], i32 0, i32 0
1738 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4
1739 // CHECK-32-NEXT: [[TMP13:%.*]] = sext i32 [[TMP12]] to i64
1740 // CHECK-32-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0
1741 // CHECK-32-NEXT: store i32 2, i32* [[TMP14]], align 4, !noalias !17
1742 // CHECK-32-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1
1743 // CHECK-32-NEXT: store i32 0, i32* [[TMP15]], align 4, !noalias !17
1744 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2
1745 // CHECK-32-NEXT: store i8** null, i8*** [[TMP16]], align 4, !noalias !17
1746 // CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3
1747 // CHECK-32-NEXT: store i8** null, i8*** [[TMP17]], align 4, !noalias !17
1748 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4
1749 // CHECK-32-NEXT: store i64* null, i64** [[TMP18]], align 4, !noalias !17
1750 // CHECK-32-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5
1751 // CHECK-32-NEXT: store i64* null, i64** [[TMP19]], align 4, !noalias !17
1752 // CHECK-32-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6
1753 // CHECK-32-NEXT: store i8** null, i8*** [[TMP20]], align 4, !noalias !17
1754 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7
1755 // CHECK-32-NEXT: store i8** null, i8*** [[TMP21]], align 4, !noalias !17
1756 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8
1757 // CHECK-32-NEXT: store i64 10, i64* [[TMP22]], align 8, !noalias !17
1758 // CHECK-32-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 9
1759 // CHECK-32-NEXT: store i64 0, i64* [[TMP23]], align 8, !noalias !17
1760 // CHECK-32-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 10
1761 // CHECK-32-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP24]], align 4, !noalias !17
1762 // CHECK-32-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 11
1763 // CHECK-32-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP25]], align 4, !noalias !17
1764 // CHECK-32-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 12
1765 // CHECK-32-NEXT: store i32 0, i32* [[TMP26]], align 4, !noalias !17
1766 // CHECK-32-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 [[TMP13]], i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]])
1767 // CHECK-32-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0
1768 // CHECK-32-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__2_EXIT:%.*]]
1769 // CHECK-32: omp_offload.failed.i:
1770 // CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66() #[[ATTR3]]
1771 // CHECK-32-NEXT: br label [[DOTOMP_OUTLINED__2_EXIT]]
1772 // CHECK-32: .omp_outlined..2.exit:
1773 // CHECK-32-NEXT: ret i32 0
1774 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76
1775 // CHECK-32-SAME: (i32* noundef [[PLOCAL:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
1776 // CHECK-32-NEXT: entry:
1777 // CHECK-32-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i32*, align 4
1778 // CHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
1779 // CHECK-32-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i32, align 4
1780 // CHECK-32-NEXT: store i32* [[PLOCAL]], i32** [[PLOCAL_ADDR]], align 4
1781 // CHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
1782 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
1783 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
1784 // CHECK-32-NEXT: store i32 [[TMP1]], i32* [[GLOBAL_CASTED]], align 4
1785 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[GLOBAL_CASTED]], align 4
1786 // CHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32 [[TMP2]])
1787 // CHECK-32-NEXT: ret void
1788 // CHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..3
1789 // CHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef [[PLOCAL:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
1790 // CHECK-32-NEXT: entry:
1791 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1792 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1793 // CHECK-32-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i32*, align 4
1794 // CHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
1795 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1796 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1797 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1798 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1799 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1800 // CHECK-32-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1801 // CHECK-32-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1802 // CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1803 // CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1804 // CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
1805 // CHECK-32-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i32, align 4
1806 // CHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1807 // CHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1808 // CHECK-32-NEXT: store i32* [[PLOCAL]], i32** [[PLOCAL_ADDR]], align 4
1809 // CHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
1810 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
1811 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1812 // CHECK-32-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1813 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1814 // CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
1815 // CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1816 // CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1817 // CHECK-32-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1818 // CHECK-32-NEXT: store i32 0, i32* [[I]], align 4
1819 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1820 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
1821 // CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1822 // CHECK-32: omp.precond.then:
1823 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
1824 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1825 // CHECK-32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
1826 // CHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1827 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1828 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1829 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1830 // CHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1831 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1832 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1833 // CHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
1834 // CHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1835 // CHECK-32: cond.true:
1836 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1837 // CHECK-32-NEXT: br label [[COND_END:%.*]]
1838 // CHECK-32: cond.false:
1839 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1840 // CHECK-32-NEXT: br label [[COND_END]]
1841 // CHECK-32: cond.end:
1842 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
1843 // CHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
1844 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1845 // CHECK-32-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
1846 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1847 // CHECK-32: omp.inner.for.cond:
1848 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1849 // CHECK-32-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1850 // CHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
1851 // CHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1852 // CHECK-32: omp.inner.for.body:
1853 // CHECK-32-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
1854 // CHECK-32-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
1855 // CHECK-32-NEXT: [[TMP16:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
1856 // CHECK-32-NEXT: [[TMP17:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
1857 // CHECK-32-NEXT: store i32 [[TMP17]], i32* [[GLOBAL_CASTED]], align 4
1858 // CHECK-32-NEXT: [[TMP18:%.*]] = load i32, i32* [[GLOBAL_CASTED]], align 4
1859 // CHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP14]], i32 [[TMP15]], i32* [[TMP16]], i32 [[TMP18]])
1860 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1861 // CHECK-32: omp.inner.for.inc:
1862 // CHECK-32-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1863 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
1864 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
1865 // CHECK-32-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
1866 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
1867 // CHECK-32: omp.inner.for.end:
1868 // CHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1869 // CHECK-32: omp.loop.exit:
1870 // CHECK-32-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1871 // CHECK-32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
1872 // CHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP22]])
1873 // CHECK-32-NEXT: br label [[OMP_PRECOND_END]]
1874 // CHECK-32: omp.precond.end:
1875 // CHECK-32-NEXT: ret void
1876 // CHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..4
1877 // CHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef [[PLOCAL:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
1878 // CHECK-32-NEXT: entry:
1879 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
1880 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
1881 // CHECK-32-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
1882 // CHECK-32-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
1883 // CHECK-32-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i32*, align 4
1884 // CHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
1885 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1886 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1887 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1888 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1889 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1890 // CHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1891 // CHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1892 // CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1893 // CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1894 // CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
1895 // CHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
1896 // CHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
1897 // CHECK-32-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1898 // CHECK-32-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1899 // CHECK-32-NEXT: store i32* [[PLOCAL]], i32** [[PLOCAL_ADDR]], align 4
1900 // CHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
1901 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
1902 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1903 // CHECK-32-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
1904 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1905 // CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
1906 // CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1907 // CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1908 // CHECK-32-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
1909 // CHECK-32-NEXT: store i32 0, i32* [[I]], align 4
1910 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
1911 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
1912 // CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
1913 // CHECK-32: omp.precond.then:
1914 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
1915 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1916 // CHECK-32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
1917 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
1918 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
1919 // CHECK-32-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
1920 // CHECK-32-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
1921 // CHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
1922 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
1923 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1924 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
1925 // CHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
1926 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1927 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1928 // CHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
1929 // CHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1930 // CHECK-32: cond.true:
1931 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
1932 // CHECK-32-NEXT: br label [[COND_END:%.*]]
1933 // CHECK-32: cond.false:
1934 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1935 // CHECK-32-NEXT: br label [[COND_END]]
1936 // CHECK-32: cond.end:
1937 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
1938 // CHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
1939 // CHECK-32-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
1940 // CHECK-32-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
1941 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1942 // CHECK-32: omp.inner.for.cond:
1943 // CHECK-32-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1944 // CHECK-32-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
1945 // CHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
1946 // CHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1947 // CHECK-32: omp.inner.for.body:
1948 // CHECK-32-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1949 // CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
1950 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1951 // CHECK-32-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
1952 // CHECK-32-NEXT: [[TMP17:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
1953 // CHECK-32-NEXT: [[TMP18:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
1954 // CHECK-32-NEXT: store i32 [[TMP17]], i32* [[TMP18]], align 4
1955 // CHECK-32-NEXT: [[TMP19:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
1956 // CHECK-32-NEXT: store i32 [[TMP19]], i32* @_ZZ3fooiE6local1, align 4
1957 // CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1958 // CHECK-32: omp.body.continue:
1959 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1960 // CHECK-32: omp.inner.for.inc:
1961 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
1962 // CHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
1963 // CHECK-32-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
1964 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
1965 // CHECK-32: omp.inner.for.end:
1966 // CHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1967 // CHECK-32: omp.loop.exit:
1968 // CHECK-32-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
1969 // CHECK-32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
1970 // CHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP22]])
1971 // CHECK-32-NEXT: br label [[OMP_PRECOND_END]]
1972 // CHECK-32: omp.precond.end:
1973 // CHECK-32-NEXT: ret void
1974 // CHECK-32-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1975 // CHECK-32-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i32*** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]], [2 x i8*]** noalias noundef [[TMP3:%.*]], [2 x i8*]** noalias noundef [[TMP4:%.*]], [2 x i64]** noalias noundef [[TMP5:%.*]]) #[[ATTR7:[0-9]+]] {
1976 // CHECK-32-NEXT: entry:
1977 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4
1978 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32***, align 4
1979 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i32**, align 4
1980 // CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca [2 x i8*]**, align 4
1981 // CHECK-32-NEXT: [[DOTADDR4:%.*]] = alloca [2 x i8*]**, align 4
1982 // CHECK-32-NEXT: [[DOTADDR5:%.*]] = alloca [2 x i64]**, align 4
1983 // CHECK-32-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4
1984 // CHECK-32-NEXT: store i32*** [[TMP1]], i32**** [[DOTADDR1]], align 4
1985 // CHECK-32-NEXT: store i32** [[TMP2]], i32*** [[DOTADDR2]], align 4
1986 // CHECK-32-NEXT: store [2 x i8*]** [[TMP3]], [2 x i8*]*** [[DOTADDR3]], align 4
1987 // CHECK-32-NEXT: store [2 x i8*]** [[TMP4]], [2 x i8*]*** [[DOTADDR4]], align 4
1988 // CHECK-32-NEXT: store [2 x i64]** [[TMP5]], [2 x i64]*** [[DOTADDR5]], align 4
1989 // CHECK-32-NEXT: [[TMP6:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4
1990 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 0
1991 // CHECK-32-NEXT: [[TMP8:%.*]] = load [2 x i64]**, [2 x i64]*** [[DOTADDR5]], align 4
1992 // CHECK-32-NEXT: store [2 x i64]* [[TMP7]], [2 x i64]** [[TMP8]], align 4
1993 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 1
1994 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32***, i32**** [[DOTADDR1]], align 4
1995 // CHECK-32-NEXT: store i32** [[TMP9]], i32*** [[TMP10]], align 4
1996 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 2
1997 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32**, i32*** [[DOTADDR2]], align 4
1998 // CHECK-32-NEXT: store i32* [[TMP11]], i32** [[TMP12]], align 4
1999 // CHECK-32-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 3
2000 // CHECK-32-NEXT: [[TMP14:%.*]] = load [2 x i8*]**, [2 x i8*]*** [[DOTADDR3]], align 4
2001 // CHECK-32-NEXT: store [2 x i8*]* [[TMP13]], [2 x i8*]** [[TMP14]], align 4
2002 // CHECK-32-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 4
2003 // CHECK-32-NEXT: [[TMP16:%.*]] = load [2 x i8*]**, [2 x i8*]*** [[DOTADDR4]], align 4
2004 // CHECK-32-NEXT: store [2 x i8*]* [[TMP15]], [2 x i8*]** [[TMP16]], align 4
2005 // CHECK-32-NEXT: ret void
2006 // CHECK-32-LABEL: define {{[^@]+}}@.omp_task_entry..6
2007 // CHECK-32-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.1* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
2008 // CHECK-32-NEXT: entry:
2009 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
2010 // CHECK-32-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
2011 // CHECK-32-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
2012 // CHECK-32-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
2013 // CHECK-32-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
2014 // CHECK-32-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 4
2015 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32**, align 4
2016 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 4
2017 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [2 x i8*]*, align 4
2018 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [2 x i8*]*, align 4
2019 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR4_I:%.*]] = alloca [2 x i64]*, align 4
2020 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR__I:%.*]] = alloca i32, align 4
2021 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_5_I:%.*]] = alloca i32, align 4
2022 // CHECK-32-NEXT: [[GLOBAL_CASTED_I:%.*]] = alloca i32, align 4
2023 // CHECK-32-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
2024 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
2025 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.1*, align 4
2026 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
2027 // CHECK-32-NEXT: store %struct.kmp_task_t_with_privates.1* [[TMP1]], %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 4
2028 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
2029 // CHECK-32-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.1*, %struct.kmp_task_t_with_privates.1** [[DOTADDR1]], align 4
2030 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], %struct.kmp_task_t_with_privates.1* [[TMP3]], i32 0, i32 0
2031 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
2032 // CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
2033 // CHECK-32-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2034 // CHECK-32-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
2035 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1]], %struct.kmp_task_t_with_privates.1* [[TMP3]], i32 0, i32 1
2036 // CHECK-32-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
2037 // CHECK-32-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.1* [[TMP3]] to i8*
2038 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]])
2039 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
2040 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]])
2041 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]])
2042 // CHECK-32-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27
2043 // CHECK-32-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !27
2044 // CHECK-32-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
2045 // CHECK-32-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i32***, i32**, [2 x i8*]**, [2 x i8*]**, [2 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
2046 // CHECK-32-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !27
2047 // CHECK-32-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
2048 // CHECK-32-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 4, !noalias !27
2049 // CHECK-32-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27
2050 // CHECK-32-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27
2051 // CHECK-32-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i32***, i32**, [2 x i8*]**, [2 x i8*]**, [2 x i64]**)*
2052 // CHECK-32-NEXT: call void [[TMP15]](i8* [[TMP14]], i32*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], [2 x i64]** [[DOTFIRSTPRIV_PTR_ADDR4_I]]) #[[ATTR3]]
2053 // CHECK-32-NEXT: [[TMP16:%.*]] = load i32**, i32*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !27
2054 // CHECK-32-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !27
2055 // CHECK-32-NEXT: [[TMP18:%.*]] = load [2 x i8*]*, [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !27
2056 // CHECK-32-NEXT: [[TMP19:%.*]] = load [2 x i8*]*, [2 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !27
2057 // CHECK-32-NEXT: [[TMP20:%.*]] = load [2 x i64]*, [2 x i64]** [[DOTFIRSTPRIV_PTR_ADDR4_I]], align 4, !noalias !27
2058 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP18]], i32 0, i32 0
2059 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP19]], i32 0, i32 0
2060 // CHECK-32-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x i64], [2 x i64]* [[TMP20]], i32 0, i32 0
2061 // CHECK-32-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 2
2062 // CHECK-32-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4
2063 // CHECK-32-NEXT: [[TMP26:%.*]] = sext i32 [[TMP25]] to i64
2064 // CHECK-32-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP16]], align 4
2065 // CHECK-32-NEXT: [[TMP28:%.*]] = load i32, i32* [[TMP27]], align 4
2066 // CHECK-32-NEXT: store i32 [[TMP28]], i32* [[DOTCAPTURE_EXPR__I]], align 4, !noalias !27
2067 // CHECK-32-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__I]], align 4, !noalias !27
2068 // CHECK-32-NEXT: [[SUB6_I:%.*]] = sub nsw i32 [[TMP29]], 1
2069 // CHECK-32-NEXT: store i32 [[SUB6_I]], i32* [[DOTCAPTURE_EXPR_5_I]], align 4, !noalias !27
2070 // CHECK-32-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_5_I]], align 4, !noalias !27
2071 // CHECK-32-NEXT: [[ADD_I:%.*]] = add nsw i32 [[TMP30]], 1
2072 // CHECK-32-NEXT: [[TMP31:%.*]] = zext i32 [[ADD_I]] to i64
2073 // CHECK-32-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0
2074 // CHECK-32-NEXT: store i32 2, i32* [[TMP32]], align 4, !noalias !27
2075 // CHECK-32-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1
2076 // CHECK-32-NEXT: store i32 2, i32* [[TMP33]], align 4, !noalias !27
2077 // CHECK-32-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2
2078 // CHECK-32-NEXT: store i8** [[TMP21]], i8*** [[TMP34]], align 4, !noalias !27
2079 // CHECK-32-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3
2080 // CHECK-32-NEXT: store i8** [[TMP22]], i8*** [[TMP35]], align 4, !noalias !27
2081 // CHECK-32-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4
2082 // CHECK-32-NEXT: store i64* [[TMP23]], i64** [[TMP36]], align 4, !noalias !27
2083 // CHECK-32-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5
2084 // CHECK-32-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP37]], align 4, !noalias !27
2085 // CHECK-32-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6
2086 // CHECK-32-NEXT: store i8** null, i8*** [[TMP38]], align 4, !noalias !27
2087 // CHECK-32-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7
2088 // CHECK-32-NEXT: store i8** null, i8*** [[TMP39]], align 4, !noalias !27
2089 // CHECK-32-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8
2090 // CHECK-32-NEXT: store i64 [[TMP31]], i64* [[TMP40]], align 8, !noalias !27
2091 // CHECK-32-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 9
2092 // CHECK-32-NEXT: store i64 0, i64* [[TMP41]], align 8, !noalias !27
2093 // CHECK-32-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 10
2094 // CHECK-32-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP42]], align 4, !noalias !27
2095 // CHECK-32-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 11
2096 // CHECK-32-NEXT: store [3 x i32] zeroinitializer, [3 x i32]* [[TMP43]], align 4, !noalias !27
2097 // CHECK-32-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 12
2098 // CHECK-32-NEXT: store i32 0, i32* [[TMP44]], align 4, !noalias !27
2099 // CHECK-32-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB3]], i64 [[TMP26]], i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]])
2100 // CHECK-32-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0
2101 // CHECK-32-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__5_EXIT:%.*]]
2102 // CHECK-32: omp_offload.failed.i:
2103 // CHECK-32-NEXT: [[TMP47:%.*]] = load i32*, i32** [[TMP16]], align 4
2104 // CHECK-32-NEXT: [[TMP48:%.*]] = load i32, i32* @global, align 4, !noalias !27
2105 // CHECK-32-NEXT: store i32 [[TMP48]], i32* [[GLOBAL_CASTED_I]], align 4, !noalias !27
2106 // CHECK-32-NEXT: [[TMP49:%.*]] = load i32, i32* [[GLOBAL_CASTED_I]], align 4, !noalias !27
2107 // CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76(i32* [[TMP47]], i32 [[TMP49]]) #[[ATTR3]]
2108 // CHECK-32-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]]
2109 // CHECK-32: .omp_outlined..5.exit:
2110 // CHECK-32-NEXT: ret i32 0
2111 // CHECK-32-LABEL: define {{[^@]+}}@.omp_task_privates_map..8
2112 // CHECK-32-SAME: (%struct..kmp_privates.t.3* noalias noundef [[TMP0:%.*]], i32*** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]]) #[[ATTR7]] {
2113 // CHECK-32-NEXT: entry:
2114 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t.3*, align 4
2115 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32***, align 4
2116 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i32**, align 4
2117 // CHECK-32-NEXT: store %struct..kmp_privates.t.3* [[TMP0]], %struct..kmp_privates.t.3** [[DOTADDR]], align 4
2118 // CHECK-32-NEXT: store i32*** [[TMP1]], i32**** [[DOTADDR1]], align 4
2119 // CHECK-32-NEXT: store i32** [[TMP2]], i32*** [[DOTADDR2]], align 4
2120 // CHECK-32-NEXT: [[TMP3:%.*]] = load %struct..kmp_privates.t.3*, %struct..kmp_privates.t.3** [[DOTADDR]], align 4
2121 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 0
2122 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32***, i32**** [[DOTADDR1]], align 4
2123 // CHECK-32-NEXT: store i32** [[TMP4]], i32*** [[TMP5]], align 4
2124 // CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 1
2125 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR2]], align 4
2126 // CHECK-32-NEXT: store i32* [[TMP6]], i32** [[TMP7]], align 4
2127 // CHECK-32-NEXT: ret void
2128 // CHECK-32-LABEL: define {{[^@]+}}@.omp_task_entry..9
2129 // CHECK-32-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
2130 // CHECK-32-NEXT: entry:
2131 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
2132 // CHECK-32-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
2133 // CHECK-32-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
2134 // CHECK-32-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
2135 // CHECK-32-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
2136 // CHECK-32-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.0*, align 4
2137 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32**, align 4
2138 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 4
2139 // CHECK-32-NEXT: [[GLOBAL_CASTED_I:%.*]] = alloca i32, align 4
2140 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
2141 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 4
2142 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
2143 // CHECK-32-NEXT: store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 4
2144 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
2145 // CHECK-32-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 4
2146 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 0
2147 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
2148 // CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
2149 // CHECK-32-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2150 // CHECK-32-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.0*
2151 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 1
2152 // CHECK-32-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t.3* [[TMP9]] to i8*
2153 // CHECK-32-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.2* [[TMP3]] to i8*
2154 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]])
2155 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]])
2156 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]])
2157 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META35:![0-9]+]])
2158 // CHECK-32-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !37
2159 // CHECK-32-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !37
2160 // CHECK-32-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !37
2161 // CHECK-32-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t.3*, i32***, i32**)* @.omp_task_privates_map..8 to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !37
2162 // CHECK-32-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !37
2163 // CHECK-32-NEXT: store %struct.anon.0* [[TMP8]], %struct.anon.0** [[__CONTEXT_ADDR_I]], align 4, !noalias !37
2164 // CHECK-32-NEXT: [[TMP12:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR_I]], align 4, !noalias !37
2165 // CHECK-32-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !37
2166 // CHECK-32-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !37
2167 // CHECK-32-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i32***, i32**)*
2168 // CHECK-32-NEXT: call void [[TMP15]](i8* [[TMP14]], i32*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]]
2169 // CHECK-32-NEXT: [[TMP16:%.*]] = load i32**, i32*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !37
2170 // CHECK-32-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !37
2171 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP12]], i32 0, i32 2
2172 // CHECK-32-NEXT: [[TMP19:%.*]] = load i32*, i32** [[TMP16]], align 4
2173 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, i32* @global, align 4, !noalias !37
2174 // CHECK-32-NEXT: store i32 [[TMP20]], i32* [[GLOBAL_CASTED_I]], align 4, !noalias !37
2175 // CHECK-32-NEXT: [[TMP21:%.*]] = load i32, i32* [[GLOBAL_CASTED_I]], align 4, !noalias !37
2176 // CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76(i32* [[TMP19]], i32 [[TMP21]]) #[[ATTR3]]
2177 // CHECK-32-NEXT: ret i32 0
2178 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l83
2179 // CHECK-32-SAME: (i32 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
2180 // CHECK-32-NEXT: entry:
2181 // CHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
2182 // CHECK-32-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i32, align 4
2183 // CHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
2184 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
2185 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[GLOBAL_CASTED]], align 4
2186 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[GLOBAL_CASTED]], align 4
2187 // CHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP1]])
2188 // CHECK-32-NEXT: ret void
2189 // CHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..10
2190 // CHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
2191 // CHECK-32-NEXT: entry:
2192 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2193 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2194 // CHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
2195 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2196 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
2197 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2198 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2199 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
2200 // CHECK-32-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2201 // CHECK-32-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2202 // CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2203 // CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2204 // CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
2205 // CHECK-32-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i32, align 4
2206 // CHECK-32-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
2207 // CHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2208 // CHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2209 // CHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
2210 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
2211 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
2212 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2213 // CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
2214 // CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2215 // CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2216 // CHECK-32-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2217 // CHECK-32-NEXT: store i32 0, i32* [[I]], align 4
2218 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2219 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]]
2220 // CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2221 // CHECK-32: omp.precond.then:
2222 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2223 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2224 // CHECK-32-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_COMB_UB]], align 4
2225 // CHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2226 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2227 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2228 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
2229 // CHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2230 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2231 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2232 // CHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
2233 // CHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2234 // CHECK-32: cond.true:
2235 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2236 // CHECK-32-NEXT: br label [[COND_END:%.*]]
2237 // CHECK-32: cond.false:
2238 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2239 // CHECK-32-NEXT: br label [[COND_END]]
2240 // CHECK-32: cond.end:
2241 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
2242 // CHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2243 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2244 // CHECK-32-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV]], align 4
2245 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2246 // CHECK-32: omp.inner.for.cond:
2247 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2248 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2249 // CHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
2250 // CHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2251 // CHECK-32: omp.inner.for.body:
2252 // CHECK-32-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2253 // CHECK-32-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2254 // CHECK-32-NEXT: [[TMP15:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
2255 // CHECK-32-NEXT: store i32 [[TMP15]], i32* [[GLOBAL_CASTED]], align 4
2256 // CHECK-32-NEXT: [[TMP16:%.*]] = load i32, i32* [[GLOBAL_CASTED]], align 4
2257 // CHECK-32-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2258 // CHECK-32-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
2259 // CHECK-32-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP18]])
2260 // CHECK-32-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2261 // CHECK-32-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
2262 // CHECK-32-NEXT: call void @.omp_outlined..11(i32* [[TMP19]], i32* [[DOTBOUND_ZERO_ADDR]], i32 [[TMP13]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR3]]
2263 // CHECK-32-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP18]])
2264 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2265 // CHECK-32: omp.inner.for.inc:
2266 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2267 // CHECK-32-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2268 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
2269 // CHECK-32-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2270 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
2271 // CHECK-32: omp.inner.for.end:
2272 // CHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2273 // CHECK-32: omp.loop.exit:
2274 // CHECK-32-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2275 // CHECK-32-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4
2276 // CHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP23]])
2277 // CHECK-32-NEXT: br label [[OMP_PRECOND_END]]
2278 // CHECK-32: omp.precond.end:
2279 // CHECK-32-NEXT: ret void
2280 // CHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..11
2281 // CHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR2]] {
2282 // CHECK-32-NEXT: entry:
2283 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2284 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2285 // CHECK-32-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2286 // CHECK-32-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2287 // CHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
2288 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2289 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
2290 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2291 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2292 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
2293 // CHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2294 // CHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2295 // CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2296 // CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2297 // CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
2298 // CHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2299 // CHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2300 // CHECK-32-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2301 // CHECK-32-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2302 // CHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
2303 // CHECK-32-NEXT: [[TMP0:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
2304 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4
2305 // CHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2306 // CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0
2307 // CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2308 // CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2309 // CHECK-32-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2310 // CHECK-32-NEXT: store i32 0, i32* [[I]], align 4
2311 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2312 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]]
2313 // CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2314 // CHECK-32: omp.precond.then:
2315 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2316 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2317 // CHECK-32-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4
2318 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2319 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2320 // CHECK-32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4
2321 // CHECK-32-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4
2322 // CHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2323 // CHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2324 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2325 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
2326 // CHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2327 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2328 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2329 // CHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
2330 // CHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2331 // CHECK-32: cond.true:
2332 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2333 // CHECK-32-NEXT: br label [[COND_END:%.*]]
2334 // CHECK-32: cond.false:
2335 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2336 // CHECK-32-NEXT: br label [[COND_END]]
2337 // CHECK-32: cond.end:
2338 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
2339 // CHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2340 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2341 // CHECK-32-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4
2342 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2343 // CHECK-32: omp.inner.for.cond:
2344 // CHECK-32-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2345 // CHECK-32-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2346 // CHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
2347 // CHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2348 // CHECK-32: omp.inner.for.body:
2349 // CHECK-32-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2350 // CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
2351 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2352 // CHECK-32-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
2353 // CHECK-32-NEXT: [[TMP16:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
2354 // CHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
2355 // CHECK-32-NEXT: store i32 [[ADD6]], i32* [[GLOBAL_ADDR]], align 4
2356 // CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2357 // CHECK-32: omp.body.continue:
2358 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2359 // CHECK-32: omp.inner.for.inc:
2360 // CHECK-32-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2361 // CHECK-32-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], 1
2362 // CHECK-32-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4
2363 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
2364 // CHECK-32: omp.inner.for.end:
2365 // CHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2366 // CHECK-32: omp.loop.exit:
2367 // CHECK-32-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2368 // CHECK-32-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4
2369 // CHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]])
2370 // CHECK-32-NEXT: br label [[OMP_PRECOND_END]]
2371 // CHECK-32: omp.precond.end:
2372 // CHECK-32-NEXT: ret void
2373 // CHECK-32-LABEL: define {{[^@]+}}@.omp_task_privates_map..13
2374 // CHECK-32-SAME: (%struct..kmp_privates.t.6* noalias noundef [[TMP0:%.*]], i32** noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
2375 // CHECK-32-NEXT: entry:
2376 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t.6*, align 4
2377 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32**, align 4
2378 // CHECK-32-NEXT: store %struct..kmp_privates.t.6* [[TMP0]], %struct..kmp_privates.t.6** [[DOTADDR]], align 4
2379 // CHECK-32-NEXT: store i32** [[TMP1]], i32*** [[DOTADDR1]], align 4
2380 // CHECK-32-NEXT: [[TMP2:%.*]] = load %struct..kmp_privates.t.6*, %struct..kmp_privates.t.6** [[DOTADDR]], align 4
2381 // CHECK-32-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_6:%.*]], %struct..kmp_privates.t.6* [[TMP2]], i32 0, i32 0
2382 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[DOTADDR1]], align 4
2383 // CHECK-32-NEXT: store i32* [[TMP3]], i32** [[TMP4]], align 4
2384 // CHECK-32-NEXT: ret void
2385 // CHECK-32-LABEL: define {{[^@]+}}@.omp_task_entry..14
2386 // CHECK-32-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.5* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] {
2387 // CHECK-32-NEXT: entry:
2388 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
2389 // CHECK-32-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4
2390 // CHECK-32-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4
2391 // CHECK-32-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4
2392 // CHECK-32-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4
2393 // CHECK-32-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.4*, align 4
2394 // CHECK-32-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32*, align 4
2395 // CHECK-32-NEXT: [[GLOBAL_CASTED_I:%.*]] = alloca i32, align 4
2396 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4
2397 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.5*, align 4
2398 // CHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4
2399 // CHECK-32-NEXT: store %struct.kmp_task_t_with_privates.5* [[TMP1]], %struct.kmp_task_t_with_privates.5** [[DOTADDR1]], align 4
2400 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
2401 // CHECK-32-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.5*, %struct.kmp_task_t_with_privates.5** [[DOTADDR1]], align 4
2402 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], %struct.kmp_task_t_with_privates.5* [[TMP3]], i32 0, i32 0
2403 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
2404 // CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
2405 // CHECK-32-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
2406 // CHECK-32-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.4*
2407 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5]], %struct.kmp_task_t_with_privates.5* [[TMP3]], i32 0, i32 1
2408 // CHECK-32-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t.6* [[TMP9]] to i8*
2409 // CHECK-32-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.5* [[TMP3]] to i8*
2410 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]])
2411 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]])
2412 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]])
2413 // CHECK-32-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META45:![0-9]+]])
2414 // CHECK-32-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !47
2415 // CHECK-32-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !47
2416 // CHECK-32-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !47
2417 // CHECK-32-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t.6*, i32**)* @.omp_task_privates_map..13 to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !47
2418 // CHECK-32-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !47
2419 // CHECK-32-NEXT: store %struct.anon.4* [[TMP8]], %struct.anon.4** [[__CONTEXT_ADDR_I]], align 4, !noalias !47
2420 // CHECK-32-NEXT: [[TMP12:%.*]] = load %struct.anon.4*, %struct.anon.4** [[__CONTEXT_ADDR_I]], align 4, !noalias !47
2421 // CHECK-32-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !47
2422 // CHECK-32-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !47
2423 // CHECK-32-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i32**)*
2424 // CHECK-32-NEXT: call void [[TMP15]](i8* [[TMP14]], i32** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR3]]
2425 // CHECK-32-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !47
2426 // CHECK-32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4
2427 // CHECK-32-NEXT: store i32 [[TMP17]], i32* [[GLOBAL_CASTED_I]], align 4, !noalias !47
2428 // CHECK-32-NEXT: [[TMP18:%.*]] = load i32, i32* [[GLOBAL_CASTED_I]], align 4, !noalias !47
2429 // CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l83(i32 [[TMP18]]) #[[ATTR3]]
2430 // CHECK-32-NEXT: ret i32 0
2431 // CHECK-32-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2432 // CHECK-32-SAME: () #[[ATTR7]] {
2433 // CHECK-32-NEXT: entry:
2434 // CHECK-32-NEXT: call void @__tgt_register_requires(i64 1)
2435 // CHECK-32-NEXT: ret void
2436 // TCHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66
2437 // TCHECK-64-SAME: () #[[ATTR0:[0-9]+]] {
2438 // TCHECK-64-NEXT: entry:
2439 // TCHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2440 // TCHECK-64-NEXT: ret void
2441 // TCHECK-64-LABEL: define {{[^@]+}}@.omp_outlined.
2442 // TCHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2443 // TCHECK-64-NEXT: entry:
2444 // TCHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2445 // TCHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2446 // TCHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2447 // TCHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
2448 // TCHECK-64-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2449 // TCHECK-64-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2450 // TCHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2451 // TCHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2452 // TCHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
2453 // TCHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2454 // TCHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2455 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2456 // TCHECK-64-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
2457 // TCHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2458 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2459 // TCHECK-64-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2460 // TCHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2461 // TCHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2462 // TCHECK-64-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2463 // TCHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
2464 // TCHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2465 // TCHECK-64: cond.true:
2466 // TCHECK-64-NEXT: br label [[COND_END:%.*]]
2467 // TCHECK-64: cond.false:
2468 // TCHECK-64-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2469 // TCHECK-64-NEXT: br label [[COND_END]]
2470 // TCHECK-64: cond.end:
2471 // TCHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2472 // TCHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2473 // TCHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2474 // TCHECK-64-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2475 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2476 // TCHECK-64: omp.inner.for.cond:
2477 // TCHECK-64-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2478 // TCHECK-64-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2479 // TCHECK-64-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2480 // TCHECK-64-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2481 // TCHECK-64: omp.inner.for.body:
2482 // TCHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2483 // TCHECK-64-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
2484 // TCHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2485 // TCHECK-64-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
2486 // TCHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]])
2487 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2488 // TCHECK-64: omp.inner.for.inc:
2489 // TCHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2490 // TCHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2491 // TCHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
2492 // TCHECK-64-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2493 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
2494 // TCHECK-64: omp.inner.for.end:
2495 // TCHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2496 // TCHECK-64: omp.loop.exit:
2497 // TCHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
2498 // TCHECK-64-NEXT: ret void
2499 // TCHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..1
2500 // TCHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR0]] {
2501 // TCHECK-64-NEXT: entry:
2502 // TCHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2503 // TCHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2504 // TCHECK-64-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2505 // TCHECK-64-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2506 // TCHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2507 // TCHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
2508 // TCHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2509 // TCHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2510 // TCHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2511 // TCHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2512 // TCHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
2513 // TCHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2514 // TCHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2515 // TCHECK-64-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2516 // TCHECK-64-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2517 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2518 // TCHECK-64-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
2519 // TCHECK-64-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2520 // TCHECK-64-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
2521 // TCHECK-64-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2522 // TCHECK-64-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
2523 // TCHECK-64-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4
2524 // TCHECK-64-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4
2525 // TCHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2526 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2527 // TCHECK-64-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2528 // TCHECK-64-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
2529 // TCHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2530 // TCHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2531 // TCHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
2532 // TCHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2533 // TCHECK-64: cond.true:
2534 // TCHECK-64-NEXT: br label [[COND_END:%.*]]
2535 // TCHECK-64: cond.false:
2536 // TCHECK-64-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2537 // TCHECK-64-NEXT: br label [[COND_END]]
2538 // TCHECK-64: cond.end:
2539 // TCHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2540 // TCHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2541 // TCHECK-64-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2542 // TCHECK-64-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
2543 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2544 // TCHECK-64: omp.inner.for.cond:
2545 // TCHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2546 // TCHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2547 // TCHECK-64-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2548 // TCHECK-64-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2549 // TCHECK-64: omp.inner.for.body:
2550 // TCHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2551 // TCHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
2552 // TCHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2553 // TCHECK-64-NEXT: store i32 [[ADD]], i32* [[I]], align 4
2554 // TCHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2555 // TCHECK-64: omp.body.continue:
2556 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2557 // TCHECK-64: omp.inner.for.inc:
2558 // TCHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2559 // TCHECK-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
2560 // TCHECK-64-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4
2561 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
2562 // TCHECK-64: omp.inner.for.end:
2563 // TCHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2564 // TCHECK-64: omp.loop.exit:
2565 // TCHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
2566 // TCHECK-64-NEXT: ret void
2567 // TCHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76
2568 // TCHECK-64-SAME: (i64* noundef [[PLOCAL:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR0]] {
2569 // TCHECK-64-NEXT: entry:
2570 // TCHECK-64-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i64*, align 8
2571 // TCHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
2572 // TCHECK-64-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i64, align 8
2573 // TCHECK-64-NEXT: store i64* [[PLOCAL]], i64** [[PLOCAL_ADDR]], align 8
2574 // TCHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
2575 // TCHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
2576 // TCHECK-64-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
2577 // TCHECK-64-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4
2578 // TCHECK-64-NEXT: [[CONV1:%.*]] = bitcast i64* [[GLOBAL_CASTED]] to i32*
2579 // TCHECK-64-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4
2580 // TCHECK-64-NEXT: [[TMP2:%.*]] = load i64, i64* [[GLOBAL_CASTED]], align 8
2581 // TCHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64*, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64* [[TMP0]], i64 [[TMP2]])
2582 // TCHECK-64-NEXT: ret void
2583 // TCHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..2
2584 // TCHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64* noundef [[PLOCAL:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR0]] {
2585 // TCHECK-64-NEXT: entry:
2586 // TCHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2587 // TCHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2588 // TCHECK-64-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i64*, align 8
2589 // TCHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
2590 // TCHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2591 // TCHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
2592 // TCHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i64, align 8
2593 // TCHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2594 // TCHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
2595 // TCHECK-64-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2596 // TCHECK-64-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2597 // TCHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2598 // TCHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2599 // TCHECK-64-NEXT: [[I4:%.*]] = alloca i32, align 4
2600 // TCHECK-64-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i64, align 8
2601 // TCHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2602 // TCHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2603 // TCHECK-64-NEXT: store i64* [[PLOCAL]], i64** [[PLOCAL_ADDR]], align 8
2604 // TCHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
2605 // TCHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
2606 // TCHECK-64-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
2607 // TCHECK-64-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
2608 // TCHECK-64-NEXT: store i64 [[TMP1]], i64* [[DOTCAPTURE_EXPR_]], align 8
2609 // TCHECK-64-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
2610 // TCHECK-64-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP2]], 0
2611 // TCHECK-64-NEXT: [[DIV:%.*]] = sdiv i64 [[SUB]], 1
2612 // TCHECK-64-NEXT: [[CONV2:%.*]] = trunc i64 [[DIV]] to i32
2613 // TCHECK-64-NEXT: [[SUB3:%.*]] = sub nsw i32 [[CONV2]], 1
2614 // TCHECK-64-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2615 // TCHECK-64-NEXT: store i32 0, i32* [[I]], align 4
2616 // TCHECK-64-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
2617 // TCHECK-64-NEXT: [[CMP:%.*]] = icmp slt i64 0, [[TMP3]]
2618 // TCHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2619 // TCHECK-64: omp.precond.then:
2620 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2621 // TCHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2622 // TCHECK-64-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
2623 // TCHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2624 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2625 // TCHECK-64-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2626 // TCHECK-64-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2627 // TCHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2628 // TCHECK-64-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2629 // TCHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2630 // TCHECK-64-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
2631 // TCHECK-64-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2632 // TCHECK-64: cond.true:
2633 // TCHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2634 // TCHECK-64-NEXT: br label [[COND_END:%.*]]
2635 // TCHECK-64: cond.false:
2636 // TCHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2637 // TCHECK-64-NEXT: br label [[COND_END]]
2638 // TCHECK-64: cond.end:
2639 // TCHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
2640 // TCHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2641 // TCHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2642 // TCHECK-64-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
2643 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2644 // TCHECK-64: omp.inner.for.cond:
2645 // TCHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2646 // TCHECK-64-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2647 // TCHECK-64-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
2648 // TCHECK-64-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2649 // TCHECK-64: omp.inner.for.body:
2650 // TCHECK-64-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2651 // TCHECK-64-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
2652 // TCHECK-64-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2653 // TCHECK-64-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
2654 // TCHECK-64-NEXT: [[TMP18:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
2655 // TCHECK-64-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4
2656 // TCHECK-64-NEXT: [[CONV7:%.*]] = bitcast i64* [[GLOBAL_CASTED]] to i32*
2657 // TCHECK-64-NEXT: store i32 [[TMP19]], i32* [[CONV7]], align 4
2658 // TCHECK-64-NEXT: [[TMP20:%.*]] = load i64, i64* [[GLOBAL_CASTED]], align 8
2659 // TCHECK-64-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP15]], i64 [[TMP17]], i64* [[TMP18]], i64 [[TMP20]])
2660 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2661 // TCHECK-64: omp.inner.for.inc:
2662 // TCHECK-64-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2663 // TCHECK-64-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2664 // TCHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
2665 // TCHECK-64-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2666 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
2667 // TCHECK-64: omp.inner.for.end:
2668 // TCHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2669 // TCHECK-64: omp.loop.exit:
2670 // TCHECK-64-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2671 // TCHECK-64-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4
2672 // TCHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP24]])
2673 // TCHECK-64-NEXT: br label [[OMP_PRECOND_END]]
2674 // TCHECK-64: omp.precond.end:
2675 // TCHECK-64-NEXT: ret void
2676 // TCHECK-64-LABEL: define {{[^@]+}}@.omp_outlined..3
2677 // TCHECK-64-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64* noundef [[PLOCAL:%.*]], i64 noundef [[GLOBAL:%.*]]) #[[ATTR0]] {
2678 // TCHECK-64-NEXT: entry:
2679 // TCHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
2680 // TCHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
2681 // TCHECK-64-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2682 // TCHECK-64-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2683 // TCHECK-64-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i64*, align 8
2684 // TCHECK-64-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i64, align 8
2685 // TCHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2686 // TCHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
2687 // TCHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i64, align 8
2688 // TCHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2689 // TCHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
2690 // TCHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2691 // TCHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2692 // TCHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2693 // TCHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2694 // TCHECK-64-NEXT: [[I6:%.*]] = alloca i32, align 4
2695 // TCHECK-64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
2696 // TCHECK-64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
2697 // TCHECK-64-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2698 // TCHECK-64-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2699 // TCHECK-64-NEXT: store i64* [[PLOCAL]], i64** [[PLOCAL_ADDR]], align 8
2700 // TCHECK-64-NEXT: store i64 [[GLOBAL]], i64* [[GLOBAL_ADDR]], align 8
2701 // TCHECK-64-NEXT: [[CONV:%.*]] = bitcast i64* [[GLOBAL_ADDR]] to i32*
2702 // TCHECK-64-NEXT: [[TMP0:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
2703 // TCHECK-64-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8
2704 // TCHECK-64-NEXT: store i64 [[TMP1]], i64* [[DOTCAPTURE_EXPR_]], align 8
2705 // TCHECK-64-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
2706 // TCHECK-64-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP2]], 0
2707 // TCHECK-64-NEXT: [[DIV:%.*]] = sdiv i64 [[SUB]], 1
2708 // TCHECK-64-NEXT: [[CONV2:%.*]] = trunc i64 [[DIV]] to i32
2709 // TCHECK-64-NEXT: [[SUB3:%.*]] = sub nsw i32 [[CONV2]], 1
2710 // TCHECK-64-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2711 // TCHECK-64-NEXT: store i32 0, i32* [[I]], align 4
2712 // TCHECK-64-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_]], align 8
2713 // TCHECK-64-NEXT: [[CMP:%.*]] = icmp slt i64 0, [[TMP3]]
2714 // TCHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2715 // TCHECK-64: omp.precond.then:
2716 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2717 // TCHECK-64-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2718 // TCHECK-64-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
2719 // TCHECK-64-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8
2720 // TCHECK-64-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP5]] to i32
2721 // TCHECK-64-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8
2722 // TCHECK-64-NEXT: [[CONV5:%.*]] = trunc i64 [[TMP6]] to i32
2723 // TCHECK-64-NEXT: store i32 [[CONV4]], i32* [[DOTOMP_LB]], align 4
2724 // TCHECK-64-NEXT: store i32 [[CONV5]], i32* [[DOTOMP_UB]], align 4
2725 // TCHECK-64-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2726 // TCHECK-64-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2727 // TCHECK-64-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2728 // TCHECK-64-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
2729 // TCHECK-64-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2730 // TCHECK-64-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2731 // TCHECK-64-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2732 // TCHECK-64-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
2733 // TCHECK-64-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2734 // TCHECK-64: cond.true:
2735 // TCHECK-64-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2736 // TCHECK-64-NEXT: br label [[COND_END:%.*]]
2737 // TCHECK-64: cond.false:
2738 // TCHECK-64-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2739 // TCHECK-64-NEXT: br label [[COND_END]]
2740 // TCHECK-64: cond.end:
2741 // TCHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
2742 // TCHECK-64-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2743 // TCHECK-64-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2744 // TCHECK-64-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
2745 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2746 // TCHECK-64: omp.inner.for.cond:
2747 // TCHECK-64-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2748 // TCHECK-64-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2749 // TCHECK-64-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
2750 // TCHECK-64-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2751 // TCHECK-64: omp.inner.for.body:
2752 // TCHECK-64-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2753 // TCHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
2754 // TCHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2755 // TCHECK-64-NEXT: store i32 [[ADD]], i32* [[I6]], align 4
2756 // TCHECK-64-NEXT: [[TMP17:%.*]] = load i32, i32* [[CONV]], align 4
2757 // TCHECK-64-NEXT: [[CONV9:%.*]] = sext i32 [[TMP17]] to i64
2758 // TCHECK-64-NEXT: [[TMP18:%.*]] = load i64*, i64** [[PLOCAL_ADDR]], align 8
2759 // TCHECK-64-NEXT: store i64 [[CONV9]], i64* [[TMP18]], align 8
2760 // TCHECK-64-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4
2761 // TCHECK-64-NEXT: store i32 [[TMP19]], i32* @_ZZ3fooiE6local1, align 4
2762 // TCHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2763 // TCHECK-64: omp.body.continue:
2764 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2765 // TCHECK-64: omp.inner.for.inc:
2766 // TCHECK-64-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2767 // TCHECK-64-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1
2768 // TCHECK-64-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
2769 // TCHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]]
2770 // TCHECK-64: omp.inner.for.end:
2771 // TCHECK-64-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2772 // TCHECK-64: omp.loop.exit:
2773 // TCHECK-64-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
2774 // TCHECK-64-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
2775 // TCHECK-64-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP22]])
2776 // TCHECK-64-NEXT: br label [[OMP_PRECOND_END]]
2777 // TCHECK-64: omp.precond.end:
2778 // TCHECK-64-NEXT: ret void
2779 // TCHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66
2780 // TCHECK-32-SAME: () #[[ATTR0:[0-9]+]] {
2781 // TCHECK-32-NEXT: entry:
2782 // TCHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
2783 // TCHECK-32-NEXT: ret void
2784 // TCHECK-32-LABEL: define {{[^@]+}}@.omp_outlined.
2785 // TCHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
2786 // TCHECK-32-NEXT: entry:
2787 // TCHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2788 // TCHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2789 // TCHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2790 // TCHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
2791 // TCHECK-32-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2792 // TCHECK-32-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2793 // TCHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2794 // TCHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2795 // TCHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
2796 // TCHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2797 // TCHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2798 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2799 // TCHECK-32-NEXT: store i32 9, i32* [[DOTOMP_COMB_UB]], align 4
2800 // TCHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2801 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2802 // TCHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2803 // TCHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2804 // TCHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2805 // TCHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2806 // TCHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
2807 // TCHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2808 // TCHECK-32: cond.true:
2809 // TCHECK-32-NEXT: br label [[COND_END:%.*]]
2810 // TCHECK-32: cond.false:
2811 // TCHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2812 // TCHECK-32-NEXT: br label [[COND_END]]
2813 // TCHECK-32: cond.end:
2814 // TCHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2815 // TCHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2816 // TCHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2817 // TCHECK-32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
2818 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2819 // TCHECK-32: omp.inner.for.cond:
2820 // TCHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2821 // TCHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2822 // TCHECK-32-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2823 // TCHECK-32-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2824 // TCHECK-32: omp.inner.for.body:
2825 // TCHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2826 // TCHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2827 // TCHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP7]], i32 [[TMP8]])
2828 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2829 // TCHECK-32: omp.inner.for.inc:
2830 // TCHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2831 // TCHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2832 // TCHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP10]]
2833 // TCHECK-32-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2834 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
2835 // TCHECK-32: omp.inner.for.end:
2836 // TCHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2837 // TCHECK-32: omp.loop.exit:
2838 // TCHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]])
2839 // TCHECK-32-NEXT: ret void
2840 // TCHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..1
2841 // TCHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR0]] {
2842 // TCHECK-32-NEXT: entry:
2843 // TCHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2844 // TCHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2845 // TCHECK-32-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2846 // TCHECK-32-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2847 // TCHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2848 // TCHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
2849 // TCHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2850 // TCHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2851 // TCHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2852 // TCHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2853 // TCHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
2854 // TCHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2855 // TCHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2856 // TCHECK-32-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2857 // TCHECK-32-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2858 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
2859 // TCHECK-32-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4
2860 // TCHECK-32-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
2861 // TCHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
2862 // TCHECK-32-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_LB]], align 4
2863 // TCHECK-32-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_UB]], align 4
2864 // TCHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2865 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2866 // TCHECK-32-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2867 // TCHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
2868 // TCHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2869 // TCHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2870 // TCHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9
2871 // TCHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2872 // TCHECK-32: cond.true:
2873 // TCHECK-32-NEXT: br label [[COND_END:%.*]]
2874 // TCHECK-32: cond.false:
2875 // TCHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2876 // TCHECK-32-NEXT: br label [[COND_END]]
2877 // TCHECK-32: cond.end:
2878 // TCHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2879 // TCHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
2880 // TCHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
2881 // TCHECK-32-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
2882 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2883 // TCHECK-32: omp.inner.for.cond:
2884 // TCHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2885 // TCHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
2886 // TCHECK-32-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2887 // TCHECK-32-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2888 // TCHECK-32: omp.inner.for.body:
2889 // TCHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2890 // TCHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
2891 // TCHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2892 // TCHECK-32-NEXT: store i32 [[ADD]], i32* [[I]], align 4
2893 // TCHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2894 // TCHECK-32: omp.body.continue:
2895 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2896 // TCHECK-32: omp.inner.for.inc:
2897 // TCHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2898 // TCHECK-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1
2899 // TCHECK-32-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
2900 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
2901 // TCHECK-32: omp.inner.for.end:
2902 // TCHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2903 // TCHECK-32: omp.loop.exit:
2904 // TCHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
2905 // TCHECK-32-NEXT: ret void
2906 // TCHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l76
2907 // TCHECK-32-SAME: (i32* noundef [[PLOCAL:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR0]] {
2908 // TCHECK-32-NEXT: entry:
2909 // TCHECK-32-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i32*, align 4
2910 // TCHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
2911 // TCHECK-32-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i32, align 4
2912 // TCHECK-32-NEXT: store i32* [[PLOCAL]], i32** [[PLOCAL_ADDR]], align 4
2913 // TCHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
2914 // TCHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
2915 // TCHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
2916 // TCHECK-32-NEXT: store i32 [[TMP1]], i32* [[GLOBAL_CASTED]], align 4
2917 // TCHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[GLOBAL_CASTED]], align 4
2918 // TCHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32 [[TMP2]])
2919 // TCHECK-32-NEXT: ret void
2920 // TCHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..2
2921 // TCHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef [[PLOCAL:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR0]] {
2922 // TCHECK-32-NEXT: entry:
2923 // TCHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
2924 // TCHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
2925 // TCHECK-32-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i32*, align 4
2926 // TCHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
2927 // TCHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2928 // TCHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
2929 // TCHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
2930 // TCHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
2931 // TCHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
2932 // TCHECK-32-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2933 // TCHECK-32-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2934 // TCHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2935 // TCHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2936 // TCHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
2937 // TCHECK-32-NEXT: [[GLOBAL_CASTED:%.*]] = alloca i32, align 4
2938 // TCHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
2939 // TCHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
2940 // TCHECK-32-NEXT: store i32* [[PLOCAL]], i32** [[PLOCAL_ADDR]], align 4
2941 // TCHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
2942 // TCHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
2943 // TCHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
2944 // TCHECK-32-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
2945 // TCHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2946 // TCHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
2947 // TCHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
2948 // TCHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
2949 // TCHECK-32-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
2950 // TCHECK-32-NEXT: store i32 0, i32* [[I]], align 4
2951 // TCHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
2952 // TCHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
2953 // TCHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
2954 // TCHECK-32: omp.precond.then:
2955 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4
2956 // TCHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2957 // TCHECK-32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_COMB_UB]], align 4
2958 // TCHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
2959 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
2960 // TCHECK-32-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
2961 // TCHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
2962 // TCHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
2963 // TCHECK-32-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2964 // TCHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2965 // TCHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
2966 // TCHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2967 // TCHECK-32: cond.true:
2968 // TCHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
2969 // TCHECK-32-NEXT: br label [[COND_END:%.*]]
2970 // TCHECK-32: cond.false:
2971 // TCHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2972 // TCHECK-32-NEXT: br label [[COND_END]]
2973 // TCHECK-32: cond.end:
2974 // TCHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
2975 // TCHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4
2976 // TCHECK-32-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2977 // TCHECK-32-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4
2978 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2979 // TCHECK-32: omp.inner.for.cond:
2980 // TCHECK-32-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2981 // TCHECK-32-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2982 // TCHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
2983 // TCHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2984 // TCHECK-32: omp.inner.for.body:
2985 // TCHECK-32-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4
2986 // TCHECK-32-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4
2987 // TCHECK-32-NEXT: [[TMP16:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
2988 // TCHECK-32-NEXT: [[TMP17:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
2989 // TCHECK-32-NEXT: store i32 [[TMP17]], i32* [[GLOBAL_CASTED]], align 4
2990 // TCHECK-32-NEXT: [[TMP18:%.*]] = load i32, i32* [[GLOBAL_CASTED]], align 4
2991 // TCHECK-32-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP14]], i32 [[TMP15]], i32* [[TMP16]], i32 [[TMP18]])
2992 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2993 // TCHECK-32: omp.inner.for.inc:
2994 // TCHECK-32-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
2995 // TCHECK-32-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4
2996 // TCHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
2997 // TCHECK-32-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4
2998 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
2999 // TCHECK-32: omp.inner.for.end:
3000 // TCHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3001 // TCHECK-32: omp.loop.exit:
3002 // TCHECK-32-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3003 // TCHECK-32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3004 // TCHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP22]])
3005 // TCHECK-32-NEXT: br label [[OMP_PRECOND_END]]
3006 // TCHECK-32: omp.precond.end:
3007 // TCHECK-32-NEXT: ret void
3008 // TCHECK-32-LABEL: define {{[^@]+}}@.omp_outlined..3
3009 // TCHECK-32-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef [[PLOCAL:%.*]], i32 noundef [[GLOBAL:%.*]]) #[[ATTR0]] {
3010 // TCHECK-32-NEXT: entry:
3011 // TCHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
3012 // TCHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
3013 // TCHECK-32-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3014 // TCHECK-32-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3015 // TCHECK-32-NEXT: [[PLOCAL_ADDR:%.*]] = alloca i32*, align 4
3016 // TCHECK-32-NEXT: [[GLOBAL_ADDR:%.*]] = alloca i32, align 4
3017 // TCHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3018 // TCHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
3019 // TCHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
3020 // TCHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
3021 // TCHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
3022 // TCHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3023 // TCHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3024 // TCHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3025 // TCHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3026 // TCHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
3027 // TCHECK-32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
3028 // TCHECK-32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
3029 // TCHECK-32-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3030 // TCHECK-32-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3031 // TCHECK-32-NEXT: store i32* [[PLOCAL]], i32** [[PLOCAL_ADDR]], align 4
3032 // TCHECK-32-NEXT: store i32 [[GLOBAL]], i32* [[GLOBAL_ADDR]], align 4
3033 // TCHECK-32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
3034 // TCHECK-32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
3035 // TCHECK-32-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR_]], align 4
3036 // TCHECK-32-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3037 // TCHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
3038 // TCHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
3039 // TCHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
3040 // TCHECK-32-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4
3041 // TCHECK-32-NEXT: store i32 0, i32* [[I]], align 4
3042 // TCHECK-32-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4
3043 // TCHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
3044 // TCHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
3045 // TCHECK-32: omp.precond.then:
3046 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4
3047 // TCHECK-32-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3048 // TCHECK-32-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4
3049 // TCHECK-32-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4
3050 // TCHECK-32-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4
3051 // TCHECK-32-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_LB]], align 4
3052 // TCHECK-32-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4
3053 // TCHECK-32-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4
3054 // TCHECK-32-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
3055 // TCHECK-32-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3056 // TCHECK-32-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
3057 // TCHECK-32-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
3058 // TCHECK-32-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3059 // TCHECK-32-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3060 // TCHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
3061 // TCHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3062 // TCHECK-32: cond.true:
3063 // TCHECK-32-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4
3064 // TCHECK-32-NEXT: br label [[COND_END:%.*]]
3065 // TCHECK-32: cond.false:
3066 // TCHECK-32-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3067 // TCHECK-32-NEXT: br label [[COND_END]]
3068 // TCHECK-32: cond.end:
3069 // TCHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
3070 // TCHECK-32-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
3071 // TCHECK-32-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
3072 // TCHECK-32-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4
3073 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3074 // TCHECK-32: omp.inner.for.cond:
3075 // TCHECK-32-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3076 // TCHECK-32-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
3077 // TCHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
3078 // TCHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3079 // TCHECK-32: omp.inner.for.body:
3080 // TCHECK-32-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3081 // TCHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
3082 // TCHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3083 // TCHECK-32-NEXT: store i32 [[ADD]], i32* [[I3]], align 4
3084 // TCHECK-32-NEXT: [[TMP17:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
3085 // TCHECK-32-NEXT: [[TMP18:%.*]] = load i32*, i32** [[PLOCAL_ADDR]], align 4
3086 // TCHECK-32-NEXT: store i32 [[TMP17]], i32* [[TMP18]], align 4
3087 // TCHECK-32-NEXT: [[TMP19:%.*]] = load i32, i32* [[GLOBAL_ADDR]], align 4
3088 // TCHECK-32-NEXT: store i32 [[TMP19]], i32* @_ZZ3fooiE6local1, align 4
3089 // TCHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3090 // TCHECK-32: omp.body.continue:
3091 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3092 // TCHECK-32: omp.inner.for.inc:
3093 // TCHECK-32-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
3094 // TCHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
3095 // TCHECK-32-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
3096 // TCHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]]
3097 // TCHECK-32: omp.inner.for.end:
3098 // TCHECK-32-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3099 // TCHECK-32: omp.loop.exit:
3100 // TCHECK-32-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
3101 // TCHECK-32-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
3102 // TCHECK-32-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP22]])
3103 // TCHECK-32-NEXT: br label [[OMP_PRECOND_END]]
3104 // TCHECK-32: omp.precond.end:
3105 // TCHECK-32-NEXT: ret void
3106 // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66
3107 // CHECK-SAME: () #[[ATTR2:[0-9]+]] {
3108 // CHECK-NEXT: entry:
3109 // CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3:[0-9]+]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l66.omp_outlined)
3110 // CHECK-NEXT: ret void
3128 // CHECK-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
3129 // CHECK-SAME: () #[[ATTR7:[0-9]+]] {
3130 // CHECK-NEXT: entry:
3131 // CHECK-NEXT: call void @__tgt_register_requires(i64 1)
3132 // CHECK-NEXT: ret void
3214 //// NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
3215 // SIMD-ONLY0: {{.*}}
3216 // SIMD-ONLY1: {{.*}}