1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // expected-no-diagnostics
7 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
8 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2
9 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
10 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
12 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
13 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
14 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
15 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK6
17 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
19 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
20 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
22 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
23 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
24 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
25 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
26 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
27 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
30 template <typename T
, int X
, long long Y
>
37 #pragma omp teams distribute parallel for simd
38 for(int i
= 0; i
< X
; i
++) {
42 #pragma omp teams distribute parallel for simd schedule(static)
43 for(int i
= 0; i
< X
; i
++) {
47 #pragma omp teams distribute parallel for simd schedule(static, X/2)
48 for(int i
= 0; i
< X
; i
++) {
53 #pragma omp teams distribute parallel for simd schedule(dynamic)
54 for(int i
= 0; i
< X
; i
++) {
59 #pragma omp teams distribute parallel for simd schedule(dynamic, X/2)
60 for(int i
= 0; i
< X
; i
++) {
84 int teams_template_struct(void) {
92 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
94 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK14
96 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
97 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13
98 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
99 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
102 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK17
103 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
104 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK17
106 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK19
107 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
108 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK19
110 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK21
111 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
112 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK21
113 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK23
114 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
115 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK23
118 template <typename T
, int n
>
123 #pragma omp teams distribute parallel for simd
124 for(int i
= 0; i
< n
; i
++) {
128 #pragma omp teams distribute parallel for simd schedule(static)
129 for(int i
= 0; i
< n
; i
++) {
133 #pragma omp teams distribute parallel for simd schedule(static, m)
134 for(int i
= 0; i
< n
; i
++) {
138 #pragma omp teams distribute parallel for simd schedule(dynamic)
139 for(int i
= 0; i
< n
; i
++) {
143 #pragma omp teams distribute parallel for simd schedule(dynamic, m)
144 for(int i
= 0; i
< n
; i
++) {
150 int main (int argc
, char **argv
) {
155 #pragma omp teams distribute parallel for simd
156 for(int i
= 0; i
< n
; i
++) {
160 #pragma omp teams distribute parallel for simd dist_schedule(static)
161 for(int i
= 0; i
< n
; i
++) {
165 #pragma omp teams distribute parallel for simd dist_schedule(static, m)
166 for(int i
= 0; i
< n
; i
++) {
170 #pragma omp teams distribute parallel for simd schedule(dynamic)
171 for(int i
= 0; i
< n
; i
++) {
175 #pragma omp teams distribute parallel for simd schedule(dynamic, m)
176 for(int i
= 0; i
< n
; i
++) {
179 return tmain
<int, 10>(argc
);
220 #endif // #ifndef HEADER
221 // CHECK1-LABEL: define {{[^@]+}}@_Z21teams_template_structv
222 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
223 // CHECK1-NEXT: entry:
224 // CHECK1-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
225 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
226 // CHECK1-NEXT: ret i32 [[CALL]]
229 // CHECK1-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
230 // CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat {
231 // CHECK1-NEXT: entry:
232 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
233 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
234 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
235 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
236 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
237 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
238 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 8
239 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 8
240 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 8
241 // CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
242 // CHECK1-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
243 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 8
244 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 8
245 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 8
246 // CHECK1-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
247 // CHECK1-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
248 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 8
249 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 8
250 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 8
251 // CHECK1-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
252 // CHECK1-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
253 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 8
254 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 8
255 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 8
256 // CHECK1-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
257 // CHECK1-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
258 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
259 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
260 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
261 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
262 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8
263 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
264 // CHECK1-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
265 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
266 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
267 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
268 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
269 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
270 // CHECK1-NEXT: store i32 3, ptr [[TMP5]], align 4
271 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
272 // CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4
273 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
274 // CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
275 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
276 // CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
277 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
278 // CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8
279 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
280 // CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8
281 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
282 // CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8
283 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
284 // CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8
285 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
286 // CHECK1-NEXT: store i64 123, ptr [[TMP13]], align 8
287 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
288 // CHECK1-NEXT: store i64 0, ptr [[TMP14]], align 8
289 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
290 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
291 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
292 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
293 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
294 // CHECK1-NEXT: store i32 0, ptr [[TMP17]], align 4
295 // CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
296 // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
297 // CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
298 // CHECK1: omp_offload.failed:
299 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR2:[0-9]+]]
300 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
301 // CHECK1: omp_offload.cont:
302 // CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
303 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
304 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 8
305 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
306 // CHECK1-NEXT: store ptr [[A2]], ptr [[TMP21]], align 8
307 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
308 // CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
309 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
310 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
311 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
312 // CHECK1-NEXT: store i32 3, ptr [[TMP25]], align 4
313 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
314 // CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4
315 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
316 // CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
317 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
318 // CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
319 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
320 // CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 8
321 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
322 // CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 8
323 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
324 // CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8
325 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
326 // CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8
327 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
328 // CHECK1-NEXT: store i64 123, ptr [[TMP33]], align 8
329 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
330 // CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8
331 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
332 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
333 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
334 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
335 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
336 // CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4
337 // CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
338 // CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
339 // CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
340 // CHECK1: omp_offload.failed8:
341 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR2]]
342 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT9]]
343 // CHECK1: omp_offload.cont9:
344 // CHECK1-NEXT: [[A10:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
345 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
346 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 8
347 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
348 // CHECK1-NEXT: store ptr [[A10]], ptr [[TMP41]], align 8
349 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 0
350 // CHECK1-NEXT: store ptr null, ptr [[TMP42]], align 8
351 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
352 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
353 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
354 // CHECK1-NEXT: store i32 3, ptr [[TMP45]], align 4
355 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
356 // CHECK1-NEXT: store i32 1, ptr [[TMP46]], align 4
357 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
358 // CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 8
359 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
360 // CHECK1-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 8
361 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
362 // CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 8
363 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
364 // CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 8
365 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
366 // CHECK1-NEXT: store ptr null, ptr [[TMP51]], align 8
367 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
368 // CHECK1-NEXT: store ptr null, ptr [[TMP52]], align 8
369 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
370 // CHECK1-NEXT: store i64 123, ptr [[TMP53]], align 8
371 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
372 // CHECK1-NEXT: store i64 0, ptr [[TMP54]], align 8
373 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
374 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
375 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
376 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
377 // CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
378 // CHECK1-NEXT: store i32 0, ptr [[TMP57]], align 4
379 // CHECK1-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
380 // CHECK1-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
381 // CHECK1-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
382 // CHECK1: omp_offload.failed16:
383 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR2]]
384 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT17]]
385 // CHECK1: omp_offload.cont17:
386 // CHECK1-NEXT: [[A18:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
387 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
388 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 8
389 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
390 // CHECK1-NEXT: store ptr [[A18]], ptr [[TMP61]], align 8
391 // CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
392 // CHECK1-NEXT: store ptr null, ptr [[TMP62]], align 8
393 // CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
394 // CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
395 // CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
396 // CHECK1-NEXT: store i32 3, ptr [[TMP65]], align 4
397 // CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
398 // CHECK1-NEXT: store i32 1, ptr [[TMP66]], align 4
399 // CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
400 // CHECK1-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 8
401 // CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
402 // CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 8
403 // CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
404 // CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 8
405 // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
406 // CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 8
407 // CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
408 // CHECK1-NEXT: store ptr null, ptr [[TMP71]], align 8
409 // CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
410 // CHECK1-NEXT: store ptr null, ptr [[TMP72]], align 8
411 // CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
412 // CHECK1-NEXT: store i64 123, ptr [[TMP73]], align 8
413 // CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
414 // CHECK1-NEXT: store i64 0, ptr [[TMP74]], align 8
415 // CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
416 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
417 // CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
418 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
419 // CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
420 // CHECK1-NEXT: store i32 0, ptr [[TMP77]], align 4
421 // CHECK1-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
422 // CHECK1-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
423 // CHECK1-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
424 // CHECK1: omp_offload.failed24:
425 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR2]]
426 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT25]]
427 // CHECK1: omp_offload.cont25:
428 // CHECK1-NEXT: [[A26:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
429 // CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
430 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 8
431 // CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
432 // CHECK1-NEXT: store ptr [[A26]], ptr [[TMP81]], align 8
433 // CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i64 0, i64 0
434 // CHECK1-NEXT: store ptr null, ptr [[TMP82]], align 8
435 // CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
436 // CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
437 // CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
438 // CHECK1-NEXT: store i32 3, ptr [[TMP85]], align 4
439 // CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
440 // CHECK1-NEXT: store i32 1, ptr [[TMP86]], align 4
441 // CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
442 // CHECK1-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 8
443 // CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
444 // CHECK1-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 8
445 // CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
446 // CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 8
447 // CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
448 // CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 8
449 // CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
450 // CHECK1-NEXT: store ptr null, ptr [[TMP91]], align 8
451 // CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
452 // CHECK1-NEXT: store ptr null, ptr [[TMP92]], align 8
453 // CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
454 // CHECK1-NEXT: store i64 123, ptr [[TMP93]], align 8
455 // CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
456 // CHECK1-NEXT: store i64 0, ptr [[TMP94]], align 8
457 // CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
458 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
459 // CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
460 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
461 // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
462 // CHECK1-NEXT: store i32 0, ptr [[TMP97]], align 4
463 // CHECK1-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
464 // CHECK1-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
465 // CHECK1-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
466 // CHECK1: omp_offload.failed32:
467 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR2]]
468 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT33]]
469 // CHECK1: omp_offload.cont33:
470 // CHECK1-NEXT: [[A34:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
471 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i64 0, i64 0
472 // CHECK1-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
473 // CHECK1-NEXT: ret i32 [[TMP100]]
476 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
477 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
478 // CHECK1-NEXT: entry:
479 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
480 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
481 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
482 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
483 // CHECK1-NEXT: ret void
486 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
487 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
488 // CHECK1-NEXT: entry:
489 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
490 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
491 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
492 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
493 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
494 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
495 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
496 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
497 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
498 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
499 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
500 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
501 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
502 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
503 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
504 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
505 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
506 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
507 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
508 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
509 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
510 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
511 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
512 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
513 // CHECK1: cond.true:
514 // CHECK1-NEXT: br label [[COND_END:%.*]]
515 // CHECK1: cond.false:
516 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
517 // CHECK1-NEXT: br label [[COND_END]]
519 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
520 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
521 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
522 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
523 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
524 // CHECK1: omp.inner.for.cond:
525 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]]
526 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
527 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
528 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
529 // CHECK1: omp.inner.for.body:
530 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP8]]
531 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
532 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
533 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
534 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP8]]
535 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
536 // CHECK1: omp.inner.for.inc:
537 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
538 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP8]]
539 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
540 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
541 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
542 // CHECK1: omp.inner.for.end:
543 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
544 // CHECK1: omp.loop.exit:
545 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
546 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
547 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
548 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
549 // CHECK1: .omp.final.then:
550 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
551 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
552 // CHECK1: .omp.final.done:
553 // CHECK1-NEXT: ret void
556 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
557 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
558 // CHECK1-NEXT: entry:
559 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
560 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
561 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
562 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
563 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
564 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
565 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
566 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
567 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
568 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
569 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
570 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
571 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
572 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
573 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
574 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
575 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
576 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
577 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
578 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
579 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
580 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
581 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
582 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
583 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
584 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
585 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
586 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
587 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
588 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
589 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
590 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
591 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
592 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
593 // CHECK1: cond.true:
594 // CHECK1-NEXT: br label [[COND_END:%.*]]
595 // CHECK1: cond.false:
596 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
597 // CHECK1-NEXT: br label [[COND_END]]
599 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
600 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
601 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
602 // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
603 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
604 // CHECK1: omp.inner.for.cond:
605 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
606 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]]
607 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
608 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
609 // CHECK1: omp.inner.for.body:
610 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
611 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
612 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
613 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
614 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
615 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
616 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
617 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
618 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]]
619 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
620 // CHECK1: omp.body.continue:
621 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
622 // CHECK1: omp.inner.for.inc:
623 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
624 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
625 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
626 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
627 // CHECK1: omp.inner.for.end:
628 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
629 // CHECK1: omp.loop.exit:
630 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
631 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
632 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
633 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
634 // CHECK1: .omp.final.then:
635 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
636 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
637 // CHECK1: .omp.final.done:
638 // CHECK1-NEXT: ret void
641 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
642 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
643 // CHECK1-NEXT: entry:
644 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
645 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
646 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
647 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
648 // CHECK1-NEXT: ret void
651 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
652 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
653 // CHECK1-NEXT: entry:
654 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
655 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
656 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
657 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
658 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
659 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
660 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
661 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
662 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
663 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
664 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
665 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
666 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
667 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
668 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
669 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
670 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
671 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
672 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
673 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
674 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
675 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
676 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
677 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
678 // CHECK1: cond.true:
679 // CHECK1-NEXT: br label [[COND_END:%.*]]
680 // CHECK1: cond.false:
681 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
682 // CHECK1-NEXT: br label [[COND_END]]
684 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
685 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
686 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
687 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
688 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
689 // CHECK1: omp.inner.for.cond:
690 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
691 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
692 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
693 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
694 // CHECK1: omp.inner.for.body:
695 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP17]]
696 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
697 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
698 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
699 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP17]]
700 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
701 // CHECK1: omp.inner.for.inc:
702 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
703 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP17]]
704 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
705 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
706 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
707 // CHECK1: omp.inner.for.end:
708 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
709 // CHECK1: omp.loop.exit:
710 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
711 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
712 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
713 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
714 // CHECK1: .omp.final.then:
715 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
716 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
717 // CHECK1: .omp.final.done:
718 // CHECK1-NEXT: ret void
721 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
722 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
723 // CHECK1-NEXT: entry:
724 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
725 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
726 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
727 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
728 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
729 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
730 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
731 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
732 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
733 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
734 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
735 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
736 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
737 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
738 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
739 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
740 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
741 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
742 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
743 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
744 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
745 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
746 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
747 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
748 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
749 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
750 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
751 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
752 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
753 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
754 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
755 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
756 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
757 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
758 // CHECK1: cond.true:
759 // CHECK1-NEXT: br label [[COND_END:%.*]]
760 // CHECK1: cond.false:
761 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
762 // CHECK1-NEXT: br label [[COND_END]]
764 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
765 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
766 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
767 // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
768 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
769 // CHECK1: omp.inner.for.cond:
770 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]]
771 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]]
772 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
773 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
774 // CHECK1: omp.inner.for.body:
775 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
776 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
777 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
778 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
779 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
780 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
781 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
782 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
783 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]]
784 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
785 // CHECK1: omp.body.continue:
786 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
787 // CHECK1: omp.inner.for.inc:
788 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
789 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
790 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
791 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
792 // CHECK1: omp.inner.for.end:
793 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
794 // CHECK1: omp.loop.exit:
795 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
796 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
797 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
798 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
799 // CHECK1: .omp.final.then:
800 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
801 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
802 // CHECK1: .omp.final.done:
803 // CHECK1-NEXT: ret void
806 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
807 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
808 // CHECK1-NEXT: entry:
809 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
810 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
811 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
812 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
813 // CHECK1-NEXT: ret void
816 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
817 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
818 // CHECK1-NEXT: entry:
819 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
820 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
821 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
822 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
823 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
824 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
825 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
826 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
827 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
828 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
829 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
830 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
831 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
832 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
833 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
834 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
835 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
836 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
837 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
838 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
839 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
840 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
841 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
842 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
843 // CHECK1: cond.true:
844 // CHECK1-NEXT: br label [[COND_END:%.*]]
845 // CHECK1: cond.false:
846 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
847 // CHECK1-NEXT: br label [[COND_END]]
849 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
850 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
851 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
852 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
853 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
854 // CHECK1: omp.inner.for.cond:
855 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
856 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
857 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
858 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
859 // CHECK1: omp.inner.for.body:
860 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
861 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
862 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
863 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
864 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP23]]
865 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
866 // CHECK1: omp.inner.for.inc:
867 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
868 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
869 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
870 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
871 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
872 // CHECK1: omp.inner.for.end:
873 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
874 // CHECK1: omp.loop.exit:
875 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
876 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
877 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
878 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
879 // CHECK1: .omp.final.then:
880 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
881 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
882 // CHECK1: .omp.final.done:
883 // CHECK1-NEXT: ret void
886 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
887 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
888 // CHECK1-NEXT: entry:
889 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
890 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
891 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
892 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
893 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
894 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
895 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
896 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
897 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
898 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
899 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
900 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
901 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
902 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
903 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
904 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
905 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
906 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
907 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
908 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
909 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
910 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
911 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
912 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
913 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
914 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
915 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
916 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
917 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
918 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
919 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
920 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
921 // CHECK1: omp.dispatch.cond:
922 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
923 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
924 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
925 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
926 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
927 // CHECK1: cond.true:
928 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
929 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
930 // CHECK1-NEXT: br label [[COND_END:%.*]]
931 // CHECK1: cond.false:
932 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
933 // CHECK1-NEXT: br label [[COND_END]]
935 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
936 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
937 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
938 // CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
939 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
940 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
941 // CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
942 // CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
943 // CHECK1: omp.dispatch.body:
944 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
945 // CHECK1: omp.inner.for.cond:
946 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
947 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
948 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
949 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
950 // CHECK1: omp.inner.for.body:
951 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
952 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
953 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
954 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
955 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
956 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
957 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
958 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
959 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
960 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
961 // CHECK1: omp.body.continue:
962 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
963 // CHECK1: omp.inner.for.inc:
964 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
965 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
966 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
967 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
968 // CHECK1: omp.inner.for.end:
969 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
970 // CHECK1: omp.dispatch.inc:
971 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
972 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
973 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
974 // CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
975 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
976 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
977 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
978 // CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
979 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
980 // CHECK1: omp.dispatch.end:
981 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
982 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
983 // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
984 // CHECK1-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
985 // CHECK1: .omp.final.then:
986 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
987 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
988 // CHECK1: .omp.final.done:
989 // CHECK1-NEXT: ret void
992 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
993 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
994 // CHECK1-NEXT: entry:
995 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
996 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
997 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
998 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
999 // CHECK1-NEXT: ret void
1002 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
1003 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1004 // CHECK1-NEXT: entry:
1005 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1006 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1007 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1008 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1009 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1010 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1011 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1012 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1013 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1014 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1015 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1016 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1017 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1018 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1019 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1020 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1021 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1022 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1023 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1024 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1025 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1026 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1027 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1028 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1029 // CHECK1: cond.true:
1030 // CHECK1-NEXT: br label [[COND_END:%.*]]
1031 // CHECK1: cond.false:
1032 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1033 // CHECK1-NEXT: br label [[COND_END]]
1034 // CHECK1: cond.end:
1035 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1036 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1037 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1038 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1039 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1040 // CHECK1: omp.inner.for.cond:
1041 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
1042 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
1043 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1044 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1045 // CHECK1: omp.inner.for.body:
1046 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
1047 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1048 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
1049 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1050 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP29]]
1051 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1052 // CHECK1: omp.inner.for.inc:
1053 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
1054 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
1055 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1056 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
1057 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
1058 // CHECK1: omp.inner.for.end:
1059 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1060 // CHECK1: omp.loop.exit:
1061 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1062 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1063 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1064 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1065 // CHECK1: .omp.final.then:
1066 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1067 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1068 // CHECK1: .omp.final.done:
1069 // CHECK1-NEXT: ret void
1072 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
1073 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1074 // CHECK1-NEXT: entry:
1075 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1076 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1077 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1078 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1079 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1080 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1081 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1082 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1083 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1084 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1085 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1086 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1087 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1088 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1089 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1090 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1091 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1092 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1093 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1094 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1095 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1096 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1097 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1098 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1099 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1100 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1101 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1102 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1103 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1104 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1105 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1106 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
1107 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
1108 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
1109 // CHECK1: omp.dispatch.cond:
1110 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
1111 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
1112 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1113 // CHECK1: omp.dispatch.body:
1114 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1115 // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
1116 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1117 // CHECK1: omp.inner.for.cond:
1118 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
1119 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
1120 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1121 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1122 // CHECK1: omp.inner.for.body:
1123 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
1124 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
1125 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1126 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
1127 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1128 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
1129 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
1130 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1131 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
1132 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1133 // CHECK1: omp.body.continue:
1134 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1135 // CHECK1: omp.inner.for.inc:
1136 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
1137 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
1138 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
1139 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
1140 // CHECK1: omp.inner.for.end:
1141 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
1142 // CHECK1: omp.dispatch.inc:
1143 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
1144 // CHECK1: omp.dispatch.end:
1145 // CHECK1-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
1146 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1147 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1148 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1149 // CHECK1: .omp.final.then:
1150 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1151 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1152 // CHECK1: .omp.final.done:
1153 // CHECK1-NEXT: ret void
1156 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
1157 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1158 // CHECK1-NEXT: entry:
1159 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1160 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1161 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1162 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
1163 // CHECK1-NEXT: ret void
1166 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
1167 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1168 // CHECK1-NEXT: entry:
1169 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1170 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1171 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1172 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1173 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1174 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1175 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1176 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1177 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1178 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1179 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1180 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1181 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1182 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1183 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1184 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1185 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1186 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1187 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1188 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1189 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1190 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1191 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1192 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1193 // CHECK1: cond.true:
1194 // CHECK1-NEXT: br label [[COND_END:%.*]]
1195 // CHECK1: cond.false:
1196 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1197 // CHECK1-NEXT: br label [[COND_END]]
1198 // CHECK1: cond.end:
1199 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1200 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1201 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1202 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1203 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1204 // CHECK1: omp.inner.for.cond:
1205 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
1206 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
1207 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1208 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1209 // CHECK1: omp.inner.for.body:
1210 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
1211 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1212 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
1213 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1214 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP35]]
1215 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1216 // CHECK1: omp.inner.for.inc:
1217 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
1218 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
1219 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1220 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
1221 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
1222 // CHECK1: omp.inner.for.end:
1223 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1224 // CHECK1: omp.loop.exit:
1225 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1226 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1227 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1228 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1229 // CHECK1: .omp.final.then:
1230 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1231 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1232 // CHECK1: .omp.final.done:
1233 // CHECK1-NEXT: ret void
1236 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
1237 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1238 // CHECK1-NEXT: entry:
1239 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1240 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1241 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1242 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1243 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1244 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1245 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1246 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1247 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1248 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1249 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1250 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1251 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1252 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1253 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1254 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1255 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1256 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1257 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1258 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1259 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1260 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1261 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1262 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1263 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1264 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1265 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1266 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1267 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1268 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1269 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1270 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
1271 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
1272 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
1273 // CHECK1: omp.dispatch.cond:
1274 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
1275 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
1276 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1277 // CHECK1: omp.dispatch.body:
1278 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1279 // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
1280 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1281 // CHECK1: omp.inner.for.cond:
1282 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
1283 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
1284 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1285 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1286 // CHECK1: omp.inner.for.body:
1287 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
1288 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
1289 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1290 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
1291 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1292 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
1293 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
1294 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1295 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
1296 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1297 // CHECK1: omp.body.continue:
1298 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1299 // CHECK1: omp.inner.for.inc:
1300 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
1301 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
1302 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
1303 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
1304 // CHECK1: omp.inner.for.end:
1305 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
1306 // CHECK1: omp.dispatch.inc:
1307 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
1308 // CHECK1: omp.dispatch.end:
1309 // CHECK1-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
1310 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1311 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1312 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1313 // CHECK1: .omp.final.then:
1314 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1315 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1316 // CHECK1: .omp.final.done:
1317 // CHECK1-NEXT: ret void
1320 // CHECK2-LABEL: define {{[^@]+}}@_Z21teams_template_structv
1321 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
1322 // CHECK2-NEXT: entry:
1323 // CHECK2-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
1324 // CHECK2-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
1325 // CHECK2-NEXT: ret i32 [[CALL]]
1328 // CHECK2-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
1329 // CHECK2-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat {
1330 // CHECK2-NEXT: entry:
1331 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1332 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
1333 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
1334 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
1335 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1336 // CHECK2-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1337 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 8
1338 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 8
1339 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 8
1340 // CHECK2-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
1341 // CHECK2-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1342 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 8
1343 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 8
1344 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 8
1345 // CHECK2-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
1346 // CHECK2-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1347 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 8
1348 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 8
1349 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 8
1350 // CHECK2-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
1351 // CHECK2-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1352 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 8
1353 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 8
1354 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 8
1355 // CHECK2-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
1356 // CHECK2-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1357 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1358 // CHECK2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1359 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
1360 // CHECK2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1361 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8
1362 // CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1363 // CHECK2-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
1364 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1365 // CHECK2-NEXT: store ptr null, ptr [[TMP2]], align 8
1366 // CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1367 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1368 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
1369 // CHECK2-NEXT: store i32 3, ptr [[TMP5]], align 4
1370 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
1371 // CHECK2-NEXT: store i32 1, ptr [[TMP6]], align 4
1372 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
1373 // CHECK2-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
1374 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
1375 // CHECK2-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
1376 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
1377 // CHECK2-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8
1378 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
1379 // CHECK2-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8
1380 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
1381 // CHECK2-NEXT: store ptr null, ptr [[TMP11]], align 8
1382 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
1383 // CHECK2-NEXT: store ptr null, ptr [[TMP12]], align 8
1384 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
1385 // CHECK2-NEXT: store i64 123, ptr [[TMP13]], align 8
1386 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
1387 // CHECK2-NEXT: store i64 0, ptr [[TMP14]], align 8
1388 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
1389 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
1390 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
1391 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
1392 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
1393 // CHECK2-NEXT: store i32 0, ptr [[TMP17]], align 4
1394 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
1395 // CHECK2-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
1396 // CHECK2-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1397 // CHECK2: omp_offload.failed:
1398 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR2:[0-9]+]]
1399 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
1400 // CHECK2: omp_offload.cont:
1401 // CHECK2-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1402 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
1403 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 8
1404 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
1405 // CHECK2-NEXT: store ptr [[A2]], ptr [[TMP21]], align 8
1406 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
1407 // CHECK2-NEXT: store ptr null, ptr [[TMP22]], align 8
1408 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
1409 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
1410 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
1411 // CHECK2-NEXT: store i32 3, ptr [[TMP25]], align 4
1412 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
1413 // CHECK2-NEXT: store i32 1, ptr [[TMP26]], align 4
1414 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
1415 // CHECK2-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
1416 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
1417 // CHECK2-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
1418 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
1419 // CHECK2-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 8
1420 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
1421 // CHECK2-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 8
1422 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
1423 // CHECK2-NEXT: store ptr null, ptr [[TMP31]], align 8
1424 // CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
1425 // CHECK2-NEXT: store ptr null, ptr [[TMP32]], align 8
1426 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
1427 // CHECK2-NEXT: store i64 123, ptr [[TMP33]], align 8
1428 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
1429 // CHECK2-NEXT: store i64 0, ptr [[TMP34]], align 8
1430 // CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
1431 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
1432 // CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
1433 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
1434 // CHECK2-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
1435 // CHECK2-NEXT: store i32 0, ptr [[TMP37]], align 4
1436 // CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
1437 // CHECK2-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
1438 // CHECK2-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
1439 // CHECK2: omp_offload.failed8:
1440 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR2]]
1441 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT9]]
1442 // CHECK2: omp_offload.cont9:
1443 // CHECK2-NEXT: [[A10:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1444 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
1445 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 8
1446 // CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
1447 // CHECK2-NEXT: store ptr [[A10]], ptr [[TMP41]], align 8
1448 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 0
1449 // CHECK2-NEXT: store ptr null, ptr [[TMP42]], align 8
1450 // CHECK2-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
1451 // CHECK2-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
1452 // CHECK2-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
1453 // CHECK2-NEXT: store i32 3, ptr [[TMP45]], align 4
1454 // CHECK2-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
1455 // CHECK2-NEXT: store i32 1, ptr [[TMP46]], align 4
1456 // CHECK2-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
1457 // CHECK2-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 8
1458 // CHECK2-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
1459 // CHECK2-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 8
1460 // CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
1461 // CHECK2-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 8
1462 // CHECK2-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
1463 // CHECK2-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 8
1464 // CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
1465 // CHECK2-NEXT: store ptr null, ptr [[TMP51]], align 8
1466 // CHECK2-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
1467 // CHECK2-NEXT: store ptr null, ptr [[TMP52]], align 8
1468 // CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
1469 // CHECK2-NEXT: store i64 123, ptr [[TMP53]], align 8
1470 // CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
1471 // CHECK2-NEXT: store i64 0, ptr [[TMP54]], align 8
1472 // CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
1473 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
1474 // CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
1475 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
1476 // CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
1477 // CHECK2-NEXT: store i32 0, ptr [[TMP57]], align 4
1478 // CHECK2-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
1479 // CHECK2-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
1480 // CHECK2-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
1481 // CHECK2: omp_offload.failed16:
1482 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR2]]
1483 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT17]]
1484 // CHECK2: omp_offload.cont17:
1485 // CHECK2-NEXT: [[A18:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1486 // CHECK2-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
1487 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 8
1488 // CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
1489 // CHECK2-NEXT: store ptr [[A18]], ptr [[TMP61]], align 8
1490 // CHECK2-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
1491 // CHECK2-NEXT: store ptr null, ptr [[TMP62]], align 8
1492 // CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
1493 // CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
1494 // CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
1495 // CHECK2-NEXT: store i32 3, ptr [[TMP65]], align 4
1496 // CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
1497 // CHECK2-NEXT: store i32 1, ptr [[TMP66]], align 4
1498 // CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
1499 // CHECK2-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 8
1500 // CHECK2-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
1501 // CHECK2-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 8
1502 // CHECK2-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
1503 // CHECK2-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 8
1504 // CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
1505 // CHECK2-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 8
1506 // CHECK2-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
1507 // CHECK2-NEXT: store ptr null, ptr [[TMP71]], align 8
1508 // CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
1509 // CHECK2-NEXT: store ptr null, ptr [[TMP72]], align 8
1510 // CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
1511 // CHECK2-NEXT: store i64 123, ptr [[TMP73]], align 8
1512 // CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
1513 // CHECK2-NEXT: store i64 0, ptr [[TMP74]], align 8
1514 // CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
1515 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
1516 // CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
1517 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
1518 // CHECK2-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
1519 // CHECK2-NEXT: store i32 0, ptr [[TMP77]], align 4
1520 // CHECK2-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
1521 // CHECK2-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
1522 // CHECK2-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
1523 // CHECK2: omp_offload.failed24:
1524 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR2]]
1525 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT25]]
1526 // CHECK2: omp_offload.cont25:
1527 // CHECK2-NEXT: [[A26:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1528 // CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
1529 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 8
1530 // CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
1531 // CHECK2-NEXT: store ptr [[A26]], ptr [[TMP81]], align 8
1532 // CHECK2-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i64 0, i64 0
1533 // CHECK2-NEXT: store ptr null, ptr [[TMP82]], align 8
1534 // CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
1535 // CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
1536 // CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
1537 // CHECK2-NEXT: store i32 3, ptr [[TMP85]], align 4
1538 // CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
1539 // CHECK2-NEXT: store i32 1, ptr [[TMP86]], align 4
1540 // CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
1541 // CHECK2-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 8
1542 // CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
1543 // CHECK2-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 8
1544 // CHECK2-NEXT: [[TMP89:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
1545 // CHECK2-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 8
1546 // CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
1547 // CHECK2-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 8
1548 // CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
1549 // CHECK2-NEXT: store ptr null, ptr [[TMP91]], align 8
1550 // CHECK2-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
1551 // CHECK2-NEXT: store ptr null, ptr [[TMP92]], align 8
1552 // CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
1553 // CHECK2-NEXT: store i64 123, ptr [[TMP93]], align 8
1554 // CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
1555 // CHECK2-NEXT: store i64 0, ptr [[TMP94]], align 8
1556 // CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
1557 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
1558 // CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
1559 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
1560 // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
1561 // CHECK2-NEXT: store i32 0, ptr [[TMP97]], align 4
1562 // CHECK2-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
1563 // CHECK2-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
1564 // CHECK2-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
1565 // CHECK2: omp_offload.failed32:
1566 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR2]]
1567 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT33]]
1568 // CHECK2: omp_offload.cont33:
1569 // CHECK2-NEXT: [[A34:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1570 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i64 0, i64 0
1571 // CHECK2-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
1572 // CHECK2-NEXT: ret i32 [[TMP100]]
1575 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
1576 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
1577 // CHECK2-NEXT: entry:
1578 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1579 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1580 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1581 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
1582 // CHECK2-NEXT: ret void
1585 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
1586 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1587 // CHECK2-NEXT: entry:
1588 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1589 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1590 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1591 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1592 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1593 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1594 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1595 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1596 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1597 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1598 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1599 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1600 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1601 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1602 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1603 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1604 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1605 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1606 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1607 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1608 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1609 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1610 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1611 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1612 // CHECK2: cond.true:
1613 // CHECK2-NEXT: br label [[COND_END:%.*]]
1614 // CHECK2: cond.false:
1615 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1616 // CHECK2-NEXT: br label [[COND_END]]
1617 // CHECK2: cond.end:
1618 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1619 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1620 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1621 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1622 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1623 // CHECK2: omp.inner.for.cond:
1624 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]]
1625 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
1626 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1627 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1628 // CHECK2: omp.inner.for.body:
1629 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP8]]
1630 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1631 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
1632 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1633 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP8]]
1634 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1635 // CHECK2: omp.inner.for.inc:
1636 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
1637 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP8]]
1638 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1639 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
1640 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
1641 // CHECK2: omp.inner.for.end:
1642 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1643 // CHECK2: omp.loop.exit:
1644 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1645 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1646 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1647 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1648 // CHECK2: .omp.final.then:
1649 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1650 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1651 // CHECK2: .omp.final.done:
1652 // CHECK2-NEXT: ret void
1655 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
1656 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1657 // CHECK2-NEXT: entry:
1658 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1659 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1660 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1661 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1662 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1663 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1664 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1665 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1666 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1667 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1668 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1669 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1670 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1671 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1672 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1673 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1674 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1675 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1676 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1677 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1678 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1679 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1680 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1681 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1682 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1683 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1684 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1685 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1686 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1687 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
1688 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1689 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1690 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
1691 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1692 // CHECK2: cond.true:
1693 // CHECK2-NEXT: br label [[COND_END:%.*]]
1694 // CHECK2: cond.false:
1695 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1696 // CHECK2-NEXT: br label [[COND_END]]
1697 // CHECK2: cond.end:
1698 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1699 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
1700 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1701 // CHECK2-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
1702 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1703 // CHECK2: omp.inner.for.cond:
1704 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
1705 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]]
1706 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1707 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1708 // CHECK2: omp.inner.for.body:
1709 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
1710 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1711 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1712 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
1713 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1714 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
1715 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
1716 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1717 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]]
1718 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1719 // CHECK2: omp.body.continue:
1720 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1721 // CHECK2: omp.inner.for.inc:
1722 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
1723 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
1724 // CHECK2-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
1725 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
1726 // CHECK2: omp.inner.for.end:
1727 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1728 // CHECK2: omp.loop.exit:
1729 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
1730 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1731 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1732 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1733 // CHECK2: .omp.final.then:
1734 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1735 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1736 // CHECK2: .omp.final.done:
1737 // CHECK2-NEXT: ret void
1740 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
1741 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1742 // CHECK2-NEXT: entry:
1743 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1744 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1745 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1746 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
1747 // CHECK2-NEXT: ret void
1750 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
1751 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1752 // CHECK2-NEXT: entry:
1753 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1754 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1755 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1756 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1757 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1758 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1759 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1760 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1761 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1762 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1763 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1764 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1765 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1766 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1767 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1768 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1769 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1770 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1771 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1772 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1773 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1774 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1775 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1776 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1777 // CHECK2: cond.true:
1778 // CHECK2-NEXT: br label [[COND_END:%.*]]
1779 // CHECK2: cond.false:
1780 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1781 // CHECK2-NEXT: br label [[COND_END]]
1782 // CHECK2: cond.end:
1783 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1784 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1785 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1786 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1787 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1788 // CHECK2: omp.inner.for.cond:
1789 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
1790 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
1791 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1792 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1793 // CHECK2: omp.inner.for.body:
1794 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP17]]
1795 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1796 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
1797 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1798 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP17]]
1799 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1800 // CHECK2: omp.inner.for.inc:
1801 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
1802 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP17]]
1803 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1804 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
1805 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
1806 // CHECK2: omp.inner.for.end:
1807 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1808 // CHECK2: omp.loop.exit:
1809 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1810 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1811 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1812 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1813 // CHECK2: .omp.final.then:
1814 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1815 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1816 // CHECK2: .omp.final.done:
1817 // CHECK2-NEXT: ret void
1820 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
1821 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1822 // CHECK2-NEXT: entry:
1823 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1824 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1825 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1826 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1827 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1828 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1829 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1830 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1831 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1832 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1833 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1834 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1835 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1836 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1837 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1838 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1839 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1840 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1841 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1842 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1843 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1844 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1845 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1846 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1847 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1848 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1849 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1850 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1851 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1852 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
1853 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1854 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1855 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
1856 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1857 // CHECK2: cond.true:
1858 // CHECK2-NEXT: br label [[COND_END:%.*]]
1859 // CHECK2: cond.false:
1860 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1861 // CHECK2-NEXT: br label [[COND_END]]
1862 // CHECK2: cond.end:
1863 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1864 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
1865 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1866 // CHECK2-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
1867 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1868 // CHECK2: omp.inner.for.cond:
1869 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]]
1870 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]]
1871 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1872 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1873 // CHECK2: omp.inner.for.body:
1874 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
1875 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1876 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1877 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
1878 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1879 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
1880 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
1881 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1882 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]]
1883 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1884 // CHECK2: omp.body.continue:
1885 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1886 // CHECK2: omp.inner.for.inc:
1887 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
1888 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
1889 // CHECK2-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
1890 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
1891 // CHECK2: omp.inner.for.end:
1892 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1893 // CHECK2: omp.loop.exit:
1894 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
1895 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1896 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1897 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1898 // CHECK2: .omp.final.then:
1899 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1900 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1901 // CHECK2: .omp.final.done:
1902 // CHECK2-NEXT: ret void
1905 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
1906 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1907 // CHECK2-NEXT: entry:
1908 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1909 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1910 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1911 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
1912 // CHECK2-NEXT: ret void
1915 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
1916 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1917 // CHECK2-NEXT: entry:
1918 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1919 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1920 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1921 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1922 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1923 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1924 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1925 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1926 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1927 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1928 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1929 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1930 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1931 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1932 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1933 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1934 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1935 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1936 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1937 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1938 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1939 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1940 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1941 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1942 // CHECK2: cond.true:
1943 // CHECK2-NEXT: br label [[COND_END:%.*]]
1944 // CHECK2: cond.false:
1945 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1946 // CHECK2-NEXT: br label [[COND_END]]
1947 // CHECK2: cond.end:
1948 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1949 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1950 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1951 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1952 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1953 // CHECK2: omp.inner.for.cond:
1954 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
1955 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
1956 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1957 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1958 // CHECK2: omp.inner.for.body:
1959 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
1960 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1961 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
1962 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1963 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP23]]
1964 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1965 // CHECK2: omp.inner.for.inc:
1966 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
1967 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
1968 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1969 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
1970 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
1971 // CHECK2: omp.inner.for.end:
1972 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1973 // CHECK2: omp.loop.exit:
1974 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1975 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1976 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1977 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1978 // CHECK2: .omp.final.then:
1979 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1980 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1981 // CHECK2: .omp.final.done:
1982 // CHECK2-NEXT: ret void
1985 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
1986 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1987 // CHECK2-NEXT: entry:
1988 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1989 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1990 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1991 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1992 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1993 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1994 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1995 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1996 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1997 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1998 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1999 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2000 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2001 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2002 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2003 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2004 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2005 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2006 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2007 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2008 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2009 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2010 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2011 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2012 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2013 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2014 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2015 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2016 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2017 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
2018 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
2019 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2020 // CHECK2: omp.dispatch.cond:
2021 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2022 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2023 // CHECK2-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
2024 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
2025 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2026 // CHECK2: cond.true:
2027 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2028 // CHECK2-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
2029 // CHECK2-NEXT: br label [[COND_END:%.*]]
2030 // CHECK2: cond.false:
2031 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2032 // CHECK2-NEXT: br label [[COND_END]]
2033 // CHECK2: cond.end:
2034 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
2035 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2036 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2037 // CHECK2-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
2038 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
2039 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2040 // CHECK2-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
2041 // CHECK2-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2042 // CHECK2: omp.dispatch.body:
2043 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2044 // CHECK2: omp.inner.for.cond:
2045 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
2046 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
2047 // CHECK2-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
2048 // CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2049 // CHECK2: omp.inner.for.body:
2050 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
2051 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
2052 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2053 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
2054 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2055 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
2056 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
2057 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
2058 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
2059 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2060 // CHECK2: omp.body.continue:
2061 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2062 // CHECK2: omp.inner.for.inc:
2063 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
2064 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
2065 // CHECK2-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
2066 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
2067 // CHECK2: omp.inner.for.end:
2068 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2069 // CHECK2: omp.dispatch.inc:
2070 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2071 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
2072 // CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
2073 // CHECK2-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
2074 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2075 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
2076 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
2077 // CHECK2-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
2078 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
2079 // CHECK2: omp.dispatch.end:
2080 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
2081 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2082 // CHECK2-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
2083 // CHECK2-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2084 // CHECK2: .omp.final.then:
2085 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2086 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2087 // CHECK2: .omp.final.done:
2088 // CHECK2-NEXT: ret void
2091 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
2092 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2093 // CHECK2-NEXT: entry:
2094 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2095 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2096 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2097 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
2098 // CHECK2-NEXT: ret void
2101 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
2102 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2103 // CHECK2-NEXT: entry:
2104 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2105 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2106 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2107 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2108 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2109 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2110 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2111 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2112 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2113 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2114 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2115 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2116 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2117 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2118 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2119 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2120 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2121 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2122 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2123 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2124 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2125 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2126 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2127 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2128 // CHECK2: cond.true:
2129 // CHECK2-NEXT: br label [[COND_END:%.*]]
2130 // CHECK2: cond.false:
2131 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2132 // CHECK2-NEXT: br label [[COND_END]]
2133 // CHECK2: cond.end:
2134 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2135 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2136 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2137 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2138 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2139 // CHECK2: omp.inner.for.cond:
2140 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
2141 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
2142 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2143 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2144 // CHECK2: omp.inner.for.body:
2145 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
2146 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2147 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
2148 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2149 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP29]]
2150 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2151 // CHECK2: omp.inner.for.inc:
2152 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
2153 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
2154 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2155 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
2156 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
2157 // CHECK2: omp.inner.for.end:
2158 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2159 // CHECK2: omp.loop.exit:
2160 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2161 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2162 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2163 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2164 // CHECK2: .omp.final.then:
2165 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2166 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2167 // CHECK2: .omp.final.done:
2168 // CHECK2-NEXT: ret void
2171 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
2172 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2173 // CHECK2-NEXT: entry:
2174 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2175 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2176 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2177 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2178 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2179 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2180 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2181 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2182 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2183 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2184 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2185 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2186 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2187 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2188 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2189 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2190 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2191 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2192 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2193 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2194 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2195 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2196 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2197 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2198 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2199 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2200 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2201 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2202 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2203 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2204 // CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2205 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
2206 // CHECK2-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
2207 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2208 // CHECK2: omp.dispatch.cond:
2209 // CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
2210 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
2211 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2212 // CHECK2: omp.dispatch.body:
2213 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2214 // CHECK2-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
2215 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2216 // CHECK2: omp.inner.for.cond:
2217 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
2218 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
2219 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2220 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2221 // CHECK2: omp.inner.for.body:
2222 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
2223 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
2224 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2225 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
2226 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2227 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
2228 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
2229 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
2230 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
2231 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2232 // CHECK2: omp.body.continue:
2233 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2234 // CHECK2: omp.inner.for.inc:
2235 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
2236 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
2237 // CHECK2-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
2238 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
2239 // CHECK2: omp.inner.for.end:
2240 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2241 // CHECK2: omp.dispatch.inc:
2242 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
2243 // CHECK2: omp.dispatch.end:
2244 // CHECK2-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
2245 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2246 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2247 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2248 // CHECK2: .omp.final.then:
2249 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2250 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2251 // CHECK2: .omp.final.done:
2252 // CHECK2-NEXT: ret void
2255 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
2256 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2257 // CHECK2-NEXT: entry:
2258 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2259 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2260 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2261 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
2262 // CHECK2-NEXT: ret void
2265 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
2266 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2267 // CHECK2-NEXT: entry:
2268 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2269 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2270 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2271 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2272 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2273 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2274 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2275 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2276 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2277 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2278 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2279 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2280 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2281 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2282 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2283 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2284 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2285 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2286 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2287 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2288 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2289 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2290 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2291 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2292 // CHECK2: cond.true:
2293 // CHECK2-NEXT: br label [[COND_END:%.*]]
2294 // CHECK2: cond.false:
2295 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2296 // CHECK2-NEXT: br label [[COND_END]]
2297 // CHECK2: cond.end:
2298 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2299 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2300 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2301 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2302 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2303 // CHECK2: omp.inner.for.cond:
2304 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
2305 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
2306 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2307 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2308 // CHECK2: omp.inner.for.body:
2309 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
2310 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2311 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
2312 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2313 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP35]]
2314 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2315 // CHECK2: omp.inner.for.inc:
2316 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
2317 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
2318 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2319 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
2320 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
2321 // CHECK2: omp.inner.for.end:
2322 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2323 // CHECK2: omp.loop.exit:
2324 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2325 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2326 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2327 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2328 // CHECK2: .omp.final.then:
2329 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2330 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2331 // CHECK2: .omp.final.done:
2332 // CHECK2-NEXT: ret void
2335 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
2336 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2337 // CHECK2-NEXT: entry:
2338 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2339 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2340 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2341 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2342 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2343 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2344 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2345 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2346 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2347 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2348 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2349 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2350 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2351 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2352 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2353 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2354 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2355 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2356 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2357 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2358 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2359 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2360 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2361 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2362 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2363 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2364 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2365 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2366 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2367 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2368 // CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2369 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
2370 // CHECK2-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
2371 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2372 // CHECK2: omp.dispatch.cond:
2373 // CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
2374 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
2375 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2376 // CHECK2: omp.dispatch.body:
2377 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2378 // CHECK2-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
2379 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2380 // CHECK2: omp.inner.for.cond:
2381 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
2382 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
2383 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2384 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2385 // CHECK2: omp.inner.for.body:
2386 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
2387 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
2388 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2389 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
2390 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2391 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
2392 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
2393 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
2394 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
2395 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2396 // CHECK2: omp.body.continue:
2397 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2398 // CHECK2: omp.inner.for.inc:
2399 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
2400 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
2401 // CHECK2-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
2402 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
2403 // CHECK2: omp.inner.for.end:
2404 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2405 // CHECK2: omp.dispatch.inc:
2406 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
2407 // CHECK2: omp.dispatch.end:
2408 // CHECK2-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
2409 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2410 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2411 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2412 // CHECK2: .omp.final.then:
2413 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2414 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2415 // CHECK2: .omp.final.done:
2416 // CHECK2-NEXT: ret void
2419 // CHECK5-LABEL: define {{[^@]+}}@_Z21teams_template_structv
2420 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
2421 // CHECK5-NEXT: entry:
2422 // CHECK5-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
2423 // CHECK5-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
2424 // CHECK5-NEXT: ret i32 [[CALL]]
2427 // CHECK5-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
2428 // CHECK5-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
2429 // CHECK5-NEXT: entry:
2430 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2431 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
2432 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
2433 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
2434 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2435 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
2436 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 4
2437 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 4
2438 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 4
2439 // CHECK5-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
2440 // CHECK5-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2441 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 4
2442 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 4
2443 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 4
2444 // CHECK5-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
2445 // CHECK5-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2446 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 4
2447 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 4
2448 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 4
2449 // CHECK5-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
2450 // CHECK5-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2451 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 4
2452 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 4
2453 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 4
2454 // CHECK5-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
2455 // CHECK5-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2456 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2457 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2458 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
2459 // CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2460 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4
2461 // CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2462 // CHECK5-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
2463 // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2464 // CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 4
2465 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2466 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2467 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
2468 // CHECK5-NEXT: store i32 3, ptr [[TMP5]], align 4
2469 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
2470 // CHECK5-NEXT: store i32 1, ptr [[TMP6]], align 4
2471 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
2472 // CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
2473 // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
2474 // CHECK5-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
2475 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
2476 // CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4
2477 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
2478 // CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4
2479 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
2480 // CHECK5-NEXT: store ptr null, ptr [[TMP11]], align 4
2481 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
2482 // CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 4
2483 // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
2484 // CHECK5-NEXT: store i64 123, ptr [[TMP13]], align 8
2485 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
2486 // CHECK5-NEXT: store i64 0, ptr [[TMP14]], align 8
2487 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
2488 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
2489 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
2490 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
2491 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
2492 // CHECK5-NEXT: store i32 0, ptr [[TMP17]], align 4
2493 // CHECK5-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
2494 // CHECK5-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
2495 // CHECK5-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2496 // CHECK5: omp_offload.failed:
2497 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR2:[0-9]+]]
2498 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
2499 // CHECK5: omp_offload.cont:
2500 // CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2501 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
2502 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 4
2503 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
2504 // CHECK5-NEXT: store ptr [[A2]], ptr [[TMP21]], align 4
2505 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
2506 // CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 4
2507 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
2508 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
2509 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
2510 // CHECK5-NEXT: store i32 3, ptr [[TMP25]], align 4
2511 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
2512 // CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4
2513 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
2514 // CHECK5-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
2515 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
2516 // CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
2517 // CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
2518 // CHECK5-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 4
2519 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
2520 // CHECK5-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 4
2521 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
2522 // CHECK5-NEXT: store ptr null, ptr [[TMP31]], align 4
2523 // CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
2524 // CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 4
2525 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
2526 // CHECK5-NEXT: store i64 123, ptr [[TMP33]], align 8
2527 // CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
2528 // CHECK5-NEXT: store i64 0, ptr [[TMP34]], align 8
2529 // CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
2530 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
2531 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
2532 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
2533 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
2534 // CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4
2535 // CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
2536 // CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
2537 // CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
2538 // CHECK5: omp_offload.failed8:
2539 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR2]]
2540 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT9]]
2541 // CHECK5: omp_offload.cont9:
2542 // CHECK5-NEXT: [[A10:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2543 // CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
2544 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 4
2545 // CHECK5-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
2546 // CHECK5-NEXT: store ptr [[A10]], ptr [[TMP41]], align 4
2547 // CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 0
2548 // CHECK5-NEXT: store ptr null, ptr [[TMP42]], align 4
2549 // CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
2550 // CHECK5-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
2551 // CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
2552 // CHECK5-NEXT: store i32 3, ptr [[TMP45]], align 4
2553 // CHECK5-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
2554 // CHECK5-NEXT: store i32 1, ptr [[TMP46]], align 4
2555 // CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
2556 // CHECK5-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 4
2557 // CHECK5-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
2558 // CHECK5-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 4
2559 // CHECK5-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
2560 // CHECK5-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 4
2561 // CHECK5-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
2562 // CHECK5-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 4
2563 // CHECK5-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
2564 // CHECK5-NEXT: store ptr null, ptr [[TMP51]], align 4
2565 // CHECK5-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
2566 // CHECK5-NEXT: store ptr null, ptr [[TMP52]], align 4
2567 // CHECK5-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
2568 // CHECK5-NEXT: store i64 123, ptr [[TMP53]], align 8
2569 // CHECK5-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
2570 // CHECK5-NEXT: store i64 0, ptr [[TMP54]], align 8
2571 // CHECK5-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
2572 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
2573 // CHECK5-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
2574 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
2575 // CHECK5-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
2576 // CHECK5-NEXT: store i32 0, ptr [[TMP57]], align 4
2577 // CHECK5-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
2578 // CHECK5-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
2579 // CHECK5-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
2580 // CHECK5: omp_offload.failed16:
2581 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR2]]
2582 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT17]]
2583 // CHECK5: omp_offload.cont17:
2584 // CHECK5-NEXT: [[A18:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2585 // CHECK5-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
2586 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 4
2587 // CHECK5-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
2588 // CHECK5-NEXT: store ptr [[A18]], ptr [[TMP61]], align 4
2589 // CHECK5-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
2590 // CHECK5-NEXT: store ptr null, ptr [[TMP62]], align 4
2591 // CHECK5-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
2592 // CHECK5-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
2593 // CHECK5-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
2594 // CHECK5-NEXT: store i32 3, ptr [[TMP65]], align 4
2595 // CHECK5-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
2596 // CHECK5-NEXT: store i32 1, ptr [[TMP66]], align 4
2597 // CHECK5-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
2598 // CHECK5-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 4
2599 // CHECK5-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
2600 // CHECK5-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 4
2601 // CHECK5-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
2602 // CHECK5-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 4
2603 // CHECK5-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
2604 // CHECK5-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 4
2605 // CHECK5-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
2606 // CHECK5-NEXT: store ptr null, ptr [[TMP71]], align 4
2607 // CHECK5-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
2608 // CHECK5-NEXT: store ptr null, ptr [[TMP72]], align 4
2609 // CHECK5-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
2610 // CHECK5-NEXT: store i64 123, ptr [[TMP73]], align 8
2611 // CHECK5-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
2612 // CHECK5-NEXT: store i64 0, ptr [[TMP74]], align 8
2613 // CHECK5-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
2614 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
2615 // CHECK5-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
2616 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
2617 // CHECK5-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
2618 // CHECK5-NEXT: store i32 0, ptr [[TMP77]], align 4
2619 // CHECK5-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
2620 // CHECK5-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
2621 // CHECK5-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
2622 // CHECK5: omp_offload.failed24:
2623 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR2]]
2624 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT25]]
2625 // CHECK5: omp_offload.cont25:
2626 // CHECK5-NEXT: [[A26:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2627 // CHECK5-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
2628 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 4
2629 // CHECK5-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
2630 // CHECK5-NEXT: store ptr [[A26]], ptr [[TMP81]], align 4
2631 // CHECK5-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0
2632 // CHECK5-NEXT: store ptr null, ptr [[TMP82]], align 4
2633 // CHECK5-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
2634 // CHECK5-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
2635 // CHECK5-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
2636 // CHECK5-NEXT: store i32 3, ptr [[TMP85]], align 4
2637 // CHECK5-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
2638 // CHECK5-NEXT: store i32 1, ptr [[TMP86]], align 4
2639 // CHECK5-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
2640 // CHECK5-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 4
2641 // CHECK5-NEXT: [[TMP88:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
2642 // CHECK5-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 4
2643 // CHECK5-NEXT: [[TMP89:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
2644 // CHECK5-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 4
2645 // CHECK5-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
2646 // CHECK5-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 4
2647 // CHECK5-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
2648 // CHECK5-NEXT: store ptr null, ptr [[TMP91]], align 4
2649 // CHECK5-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
2650 // CHECK5-NEXT: store ptr null, ptr [[TMP92]], align 4
2651 // CHECK5-NEXT: [[TMP93:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
2652 // CHECK5-NEXT: store i64 123, ptr [[TMP93]], align 8
2653 // CHECK5-NEXT: [[TMP94:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
2654 // CHECK5-NEXT: store i64 0, ptr [[TMP94]], align 8
2655 // CHECK5-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
2656 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
2657 // CHECK5-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
2658 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
2659 // CHECK5-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
2660 // CHECK5-NEXT: store i32 0, ptr [[TMP97]], align 4
2661 // CHECK5-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
2662 // CHECK5-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
2663 // CHECK5-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
2664 // CHECK5: omp_offload.failed32:
2665 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR2]]
2666 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT33]]
2667 // CHECK5: omp_offload.cont33:
2668 // CHECK5-NEXT: [[A34:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2669 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i32 0, i32 0
2670 // CHECK5-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
2671 // CHECK5-NEXT: ret i32 [[TMP100]]
2674 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
2675 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
2676 // CHECK5-NEXT: entry:
2677 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2678 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2679 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2680 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
2681 // CHECK5-NEXT: ret void
2684 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
2685 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2686 // CHECK5-NEXT: entry:
2687 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2688 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2689 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2690 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2691 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2692 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2693 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2694 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2695 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2696 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2697 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2698 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2699 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2700 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2701 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2702 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2703 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2704 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2705 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2706 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2707 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2708 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2709 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2710 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2711 // CHECK5: cond.true:
2712 // CHECK5-NEXT: br label [[COND_END:%.*]]
2713 // CHECK5: cond.false:
2714 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2715 // CHECK5-NEXT: br label [[COND_END]]
2716 // CHECK5: cond.end:
2717 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2718 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2719 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2720 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2721 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2722 // CHECK5: omp.inner.for.cond:
2723 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
2724 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
2725 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2726 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2727 // CHECK5: omp.inner.for.body:
2728 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
2729 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
2730 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP9]]
2731 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2732 // CHECK5: omp.inner.for.inc:
2733 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
2734 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
2735 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2736 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
2737 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
2738 // CHECK5: omp.inner.for.end:
2739 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2740 // CHECK5: omp.loop.exit:
2741 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2742 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2743 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2744 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2745 // CHECK5: .omp.final.then:
2746 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2747 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2748 // CHECK5: .omp.final.done:
2749 // CHECK5-NEXT: ret void
2752 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
2753 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2754 // CHECK5-NEXT: entry:
2755 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2756 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2757 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2758 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2759 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2760 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2761 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2762 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2763 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2764 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2765 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2766 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2767 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2768 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2769 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2770 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2771 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2772 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2773 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2774 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2775 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2776 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2777 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
2778 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
2779 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2780 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2781 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2782 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
2783 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2784 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2785 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
2786 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2787 // CHECK5: cond.true:
2788 // CHECK5-NEXT: br label [[COND_END:%.*]]
2789 // CHECK5: cond.false:
2790 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2791 // CHECK5-NEXT: br label [[COND_END]]
2792 // CHECK5: cond.end:
2793 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2794 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2795 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2796 // CHECK5-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
2797 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2798 // CHECK5: omp.inner.for.cond:
2799 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
2800 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
2801 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2802 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2803 // CHECK5: omp.inner.for.body:
2804 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2805 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2806 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2807 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
2808 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2809 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
2810 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
2811 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]]
2812 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2813 // CHECK5: omp.body.continue:
2814 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2815 // CHECK5: omp.inner.for.inc:
2816 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2817 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
2818 // CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2819 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
2820 // CHECK5: omp.inner.for.end:
2821 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2822 // CHECK5: omp.loop.exit:
2823 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
2824 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2825 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2826 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2827 // CHECK5: .omp.final.then:
2828 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2829 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2830 // CHECK5: .omp.final.done:
2831 // CHECK5-NEXT: ret void
2834 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
2835 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2836 // CHECK5-NEXT: entry:
2837 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2838 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2839 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2840 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
2841 // CHECK5-NEXT: ret void
2844 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
2845 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2846 // CHECK5-NEXT: entry:
2847 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2848 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2849 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2850 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2851 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2852 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2853 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2854 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2855 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2856 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2857 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2858 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2859 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2860 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2861 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2862 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2863 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2864 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2865 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2866 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2867 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2868 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2869 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2870 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2871 // CHECK5: cond.true:
2872 // CHECK5-NEXT: br label [[COND_END:%.*]]
2873 // CHECK5: cond.false:
2874 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2875 // CHECK5-NEXT: br label [[COND_END]]
2876 // CHECK5: cond.end:
2877 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2878 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2879 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2880 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2881 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2882 // CHECK5: omp.inner.for.cond:
2883 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
2884 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
2885 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2886 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2887 // CHECK5: omp.inner.for.body:
2888 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
2889 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
2890 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP18]]
2891 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2892 // CHECK5: omp.inner.for.inc:
2893 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
2894 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
2895 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2896 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
2897 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
2898 // CHECK5: omp.inner.for.end:
2899 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2900 // CHECK5: omp.loop.exit:
2901 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2902 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2903 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2904 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2905 // CHECK5: .omp.final.then:
2906 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2907 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2908 // CHECK5: .omp.final.done:
2909 // CHECK5-NEXT: ret void
2912 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
2913 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2914 // CHECK5-NEXT: entry:
2915 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2916 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2917 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2918 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2919 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2920 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2921 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2922 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2923 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2924 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2925 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2926 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2927 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2928 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2929 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2930 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2931 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2932 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2933 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2934 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2935 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2936 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2937 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
2938 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
2939 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2940 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2941 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2942 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
2943 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2944 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2945 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
2946 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2947 // CHECK5: cond.true:
2948 // CHECK5-NEXT: br label [[COND_END:%.*]]
2949 // CHECK5: cond.false:
2950 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2951 // CHECK5-NEXT: br label [[COND_END]]
2952 // CHECK5: cond.end:
2953 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2954 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2955 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2956 // CHECK5-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
2957 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2958 // CHECK5: omp.inner.for.cond:
2959 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
2960 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
2961 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2962 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2963 // CHECK5: omp.inner.for.body:
2964 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2965 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2966 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2967 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
2968 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2969 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
2970 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
2971 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]]
2972 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2973 // CHECK5: omp.body.continue:
2974 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2975 // CHECK5: omp.inner.for.inc:
2976 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2977 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
2978 // CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2979 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
2980 // CHECK5: omp.inner.for.end:
2981 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2982 // CHECK5: omp.loop.exit:
2983 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
2984 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2985 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2986 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2987 // CHECK5: .omp.final.then:
2988 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2989 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2990 // CHECK5: .omp.final.done:
2991 // CHECK5-NEXT: ret void
2994 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
2995 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2996 // CHECK5-NEXT: entry:
2997 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2998 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2999 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3000 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
3001 // CHECK5-NEXT: ret void
3004 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
3005 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3006 // CHECK5-NEXT: entry:
3007 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3008 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3009 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3010 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3011 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3012 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3013 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3014 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3015 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3016 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3017 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3018 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3019 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3020 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3021 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3022 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3023 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3024 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3025 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3026 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3027 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3028 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3029 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3030 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3031 // CHECK5: cond.true:
3032 // CHECK5-NEXT: br label [[COND_END:%.*]]
3033 // CHECK5: cond.false:
3034 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3035 // CHECK5-NEXT: br label [[COND_END]]
3036 // CHECK5: cond.end:
3037 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3038 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3039 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3040 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3041 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3042 // CHECK5: omp.inner.for.cond:
3043 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
3044 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
3045 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3046 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3047 // CHECK5: omp.inner.for.body:
3048 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
3049 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
3050 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP24]]
3051 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3052 // CHECK5: omp.inner.for.inc:
3053 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
3054 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
3055 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3056 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
3057 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
3058 // CHECK5: omp.inner.for.end:
3059 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3060 // CHECK5: omp.loop.exit:
3061 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3062 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3063 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3064 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3065 // CHECK5: .omp.final.then:
3066 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3067 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3068 // CHECK5: .omp.final.done:
3069 // CHECK5-NEXT: ret void
3072 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
3073 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3074 // CHECK5-NEXT: entry:
3075 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3076 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3077 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3078 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3079 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3080 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3081 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3082 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3083 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3084 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3085 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3086 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3087 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3088 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3089 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3090 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3091 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3092 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3093 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3094 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3095 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3096 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3097 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3098 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3099 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3100 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3101 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3102 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
3103 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
3104 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
3105 // CHECK5: omp.dispatch.cond:
3106 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3107 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3108 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
3109 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3110 // CHECK5: cond.true:
3111 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3112 // CHECK5-NEXT: br label [[COND_END:%.*]]
3113 // CHECK5: cond.false:
3114 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3115 // CHECK5-NEXT: br label [[COND_END]]
3116 // CHECK5: cond.end:
3117 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
3118 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3119 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3120 // CHECK5-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
3121 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
3122 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3123 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
3124 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3125 // CHECK5: omp.dispatch.body:
3126 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3127 // CHECK5: omp.inner.for.cond:
3128 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
3129 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
3130 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
3131 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3132 // CHECK5: omp.inner.for.body:
3133 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3134 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
3135 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3136 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
3137 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3138 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
3139 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP15]]
3140 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]]
3141 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3142 // CHECK5: omp.body.continue:
3143 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3144 // CHECK5: omp.inner.for.inc:
3145 // CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3146 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
3147 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3148 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
3149 // CHECK5: omp.inner.for.end:
3150 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
3151 // CHECK5: omp.dispatch.inc:
3152 // CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3153 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
3154 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
3155 // CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
3156 // CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3157 // CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
3158 // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
3159 // CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
3160 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
3161 // CHECK5: omp.dispatch.end:
3162 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
3163 // CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3164 // CHECK5-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
3165 // CHECK5-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3166 // CHECK5: .omp.final.then:
3167 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3168 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3169 // CHECK5: .omp.final.done:
3170 // CHECK5-NEXT: ret void
3173 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
3174 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3175 // CHECK5-NEXT: entry:
3176 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3177 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3178 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3179 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
3180 // CHECK5-NEXT: ret void
3183 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
3184 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3185 // CHECK5-NEXT: entry:
3186 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3187 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3188 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3189 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3190 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3191 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3192 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3193 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3194 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3195 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3196 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3197 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3198 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3199 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3200 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3201 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3202 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3203 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3204 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3205 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3206 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3207 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3208 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3209 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3210 // CHECK5: cond.true:
3211 // CHECK5-NEXT: br label [[COND_END:%.*]]
3212 // CHECK5: cond.false:
3213 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3214 // CHECK5-NEXT: br label [[COND_END]]
3215 // CHECK5: cond.end:
3216 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3217 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3218 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3219 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3220 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3221 // CHECK5: omp.inner.for.cond:
3222 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
3223 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
3224 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3225 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3226 // CHECK5: omp.inner.for.body:
3227 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
3228 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
3229 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP30]]
3230 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3231 // CHECK5: omp.inner.for.inc:
3232 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
3233 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
3234 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3235 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
3236 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
3237 // CHECK5: omp.inner.for.end:
3238 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3239 // CHECK5: omp.loop.exit:
3240 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3241 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3242 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3243 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3244 // CHECK5: .omp.final.then:
3245 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3246 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3247 // CHECK5: .omp.final.done:
3248 // CHECK5-NEXT: ret void
3251 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
3252 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3253 // CHECK5-NEXT: entry:
3254 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3255 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3256 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3257 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3258 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3259 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3260 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3261 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3262 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3263 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3264 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3265 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3266 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3267 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3268 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3269 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3270 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3271 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3272 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3273 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3274 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3275 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3276 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3277 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3278 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3279 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3280 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3281 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3282 // CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3283 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
3284 // CHECK5-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
3285 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
3286 // CHECK5: omp.dispatch.cond:
3287 // CHECK5-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
3288 // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
3289 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3290 // CHECK5: omp.dispatch.body:
3291 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3292 // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
3293 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3294 // CHECK5: omp.inner.for.cond:
3295 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
3296 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
3297 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3298 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3299 // CHECK5: omp.inner.for.body:
3300 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
3301 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
3302 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3303 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
3304 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3305 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
3306 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
3307 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]]
3308 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3309 // CHECK5: omp.body.continue:
3310 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3311 // CHECK5: omp.inner.for.inc:
3312 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
3313 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
3314 // CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
3315 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
3316 // CHECK5: omp.inner.for.end:
3317 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
3318 // CHECK5: omp.dispatch.inc:
3319 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
3320 // CHECK5: omp.dispatch.end:
3321 // CHECK5-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
3322 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3323 // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
3324 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3325 // CHECK5: .omp.final.then:
3326 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3327 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3328 // CHECK5: .omp.final.done:
3329 // CHECK5-NEXT: ret void
3332 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
3333 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3334 // CHECK5-NEXT: entry:
3335 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3336 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3337 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3338 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
3339 // CHECK5-NEXT: ret void
3342 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
3343 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3344 // CHECK5-NEXT: entry:
3345 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3346 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3347 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3348 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3349 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3350 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3351 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3352 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3353 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3354 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3355 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3356 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3357 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3358 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3359 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3360 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3361 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3362 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3363 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3364 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3365 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3366 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3367 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3368 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3369 // CHECK5: cond.true:
3370 // CHECK5-NEXT: br label [[COND_END:%.*]]
3371 // CHECK5: cond.false:
3372 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3373 // CHECK5-NEXT: br label [[COND_END]]
3374 // CHECK5: cond.end:
3375 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3376 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3377 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3378 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3379 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3380 // CHECK5: omp.inner.for.cond:
3381 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
3382 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
3383 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3384 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3385 // CHECK5: omp.inner.for.body:
3386 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
3387 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
3388 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP36]]
3389 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3390 // CHECK5: omp.inner.for.inc:
3391 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
3392 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
3393 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3394 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
3395 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
3396 // CHECK5: omp.inner.for.end:
3397 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3398 // CHECK5: omp.loop.exit:
3399 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3400 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3401 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3402 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3403 // CHECK5: .omp.final.then:
3404 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3405 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3406 // CHECK5: .omp.final.done:
3407 // CHECK5-NEXT: ret void
3410 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
3411 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3412 // CHECK5-NEXT: entry:
3413 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3414 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3415 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3416 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3417 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3418 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3419 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3420 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3421 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3422 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3423 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3424 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3425 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3426 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3427 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3428 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3429 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3430 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3431 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3432 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3433 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3434 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3435 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3436 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3437 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3438 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3439 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3440 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3441 // CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3442 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
3443 // CHECK5-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
3444 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
3445 // CHECK5: omp.dispatch.cond:
3446 // CHECK5-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
3447 // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
3448 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3449 // CHECK5: omp.dispatch.body:
3450 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3451 // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
3452 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3453 // CHECK5: omp.inner.for.cond:
3454 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
3455 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
3456 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3457 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3458 // CHECK5: omp.inner.for.body:
3459 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
3460 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
3461 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3462 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
3463 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3464 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
3465 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
3466 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP39]]
3467 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3468 // CHECK5: omp.body.continue:
3469 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3470 // CHECK5: omp.inner.for.inc:
3471 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
3472 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
3473 // CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
3474 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
3475 // CHECK5: omp.inner.for.end:
3476 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
3477 // CHECK5: omp.dispatch.inc:
3478 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
3479 // CHECK5: omp.dispatch.end:
3480 // CHECK5-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
3481 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3482 // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
3483 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3484 // CHECK5: .omp.final.then:
3485 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3486 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3487 // CHECK5: .omp.final.done:
3488 // CHECK5-NEXT: ret void
3491 // CHECK6-LABEL: define {{[^@]+}}@_Z21teams_template_structv
3492 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
3493 // CHECK6-NEXT: entry:
3494 // CHECK6-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
3495 // CHECK6-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
3496 // CHECK6-NEXT: ret i32 [[CALL]]
3499 // CHECK6-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
3500 // CHECK6-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
3501 // CHECK6-NEXT: entry:
3502 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3503 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
3504 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
3505 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
3506 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3507 // CHECK6-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
3508 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 4
3509 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 4
3510 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 4
3511 // CHECK6-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
3512 // CHECK6-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3513 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 4
3514 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 4
3515 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 4
3516 // CHECK6-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
3517 // CHECK6-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3518 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 4
3519 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 4
3520 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 4
3521 // CHECK6-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
3522 // CHECK6-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3523 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 4
3524 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 4
3525 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 4
3526 // CHECK6-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
3527 // CHECK6-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3528 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3529 // CHECK6-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3530 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
3531 // CHECK6-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3532 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4
3533 // CHECK6-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3534 // CHECK6-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
3535 // CHECK6-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3536 // CHECK6-NEXT: store ptr null, ptr [[TMP2]], align 4
3537 // CHECK6-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3538 // CHECK6-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3539 // CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
3540 // CHECK6-NEXT: store i32 3, ptr [[TMP5]], align 4
3541 // CHECK6-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
3542 // CHECK6-NEXT: store i32 1, ptr [[TMP6]], align 4
3543 // CHECK6-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
3544 // CHECK6-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
3545 // CHECK6-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
3546 // CHECK6-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
3547 // CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
3548 // CHECK6-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4
3549 // CHECK6-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
3550 // CHECK6-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4
3551 // CHECK6-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
3552 // CHECK6-NEXT: store ptr null, ptr [[TMP11]], align 4
3553 // CHECK6-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
3554 // CHECK6-NEXT: store ptr null, ptr [[TMP12]], align 4
3555 // CHECK6-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
3556 // CHECK6-NEXT: store i64 123, ptr [[TMP13]], align 8
3557 // CHECK6-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
3558 // CHECK6-NEXT: store i64 0, ptr [[TMP14]], align 8
3559 // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
3560 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
3561 // CHECK6-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
3562 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
3563 // CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
3564 // CHECK6-NEXT: store i32 0, ptr [[TMP17]], align 4
3565 // CHECK6-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
3566 // CHECK6-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
3567 // CHECK6-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3568 // CHECK6: omp_offload.failed:
3569 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR2:[0-9]+]]
3570 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]]
3571 // CHECK6: omp_offload.cont:
3572 // CHECK6-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3573 // CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
3574 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 4
3575 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
3576 // CHECK6-NEXT: store ptr [[A2]], ptr [[TMP21]], align 4
3577 // CHECK6-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
3578 // CHECK6-NEXT: store ptr null, ptr [[TMP22]], align 4
3579 // CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
3580 // CHECK6-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
3581 // CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
3582 // CHECK6-NEXT: store i32 3, ptr [[TMP25]], align 4
3583 // CHECK6-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
3584 // CHECK6-NEXT: store i32 1, ptr [[TMP26]], align 4
3585 // CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
3586 // CHECK6-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
3587 // CHECK6-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
3588 // CHECK6-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
3589 // CHECK6-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
3590 // CHECK6-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 4
3591 // CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
3592 // CHECK6-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 4
3593 // CHECK6-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
3594 // CHECK6-NEXT: store ptr null, ptr [[TMP31]], align 4
3595 // CHECK6-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
3596 // CHECK6-NEXT: store ptr null, ptr [[TMP32]], align 4
3597 // CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
3598 // CHECK6-NEXT: store i64 123, ptr [[TMP33]], align 8
3599 // CHECK6-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
3600 // CHECK6-NEXT: store i64 0, ptr [[TMP34]], align 8
3601 // CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
3602 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
3603 // CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
3604 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
3605 // CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
3606 // CHECK6-NEXT: store i32 0, ptr [[TMP37]], align 4
3607 // CHECK6-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
3608 // CHECK6-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
3609 // CHECK6-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
3610 // CHECK6: omp_offload.failed8:
3611 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR2]]
3612 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT9]]
3613 // CHECK6: omp_offload.cont9:
3614 // CHECK6-NEXT: [[A10:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3615 // CHECK6-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
3616 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 4
3617 // CHECK6-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
3618 // CHECK6-NEXT: store ptr [[A10]], ptr [[TMP41]], align 4
3619 // CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 0
3620 // CHECK6-NEXT: store ptr null, ptr [[TMP42]], align 4
3621 // CHECK6-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
3622 // CHECK6-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
3623 // CHECK6-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
3624 // CHECK6-NEXT: store i32 3, ptr [[TMP45]], align 4
3625 // CHECK6-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
3626 // CHECK6-NEXT: store i32 1, ptr [[TMP46]], align 4
3627 // CHECK6-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
3628 // CHECK6-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 4
3629 // CHECK6-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
3630 // CHECK6-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 4
3631 // CHECK6-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
3632 // CHECK6-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 4
3633 // CHECK6-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
3634 // CHECK6-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 4
3635 // CHECK6-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
3636 // CHECK6-NEXT: store ptr null, ptr [[TMP51]], align 4
3637 // CHECK6-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
3638 // CHECK6-NEXT: store ptr null, ptr [[TMP52]], align 4
3639 // CHECK6-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
3640 // CHECK6-NEXT: store i64 123, ptr [[TMP53]], align 8
3641 // CHECK6-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
3642 // CHECK6-NEXT: store i64 0, ptr [[TMP54]], align 8
3643 // CHECK6-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
3644 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
3645 // CHECK6-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
3646 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
3647 // CHECK6-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
3648 // CHECK6-NEXT: store i32 0, ptr [[TMP57]], align 4
3649 // CHECK6-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
3650 // CHECK6-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
3651 // CHECK6-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
3652 // CHECK6: omp_offload.failed16:
3653 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR2]]
3654 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT17]]
3655 // CHECK6: omp_offload.cont17:
3656 // CHECK6-NEXT: [[A18:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3657 // CHECK6-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
3658 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 4
3659 // CHECK6-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
3660 // CHECK6-NEXT: store ptr [[A18]], ptr [[TMP61]], align 4
3661 // CHECK6-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
3662 // CHECK6-NEXT: store ptr null, ptr [[TMP62]], align 4
3663 // CHECK6-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
3664 // CHECK6-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
3665 // CHECK6-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
3666 // CHECK6-NEXT: store i32 3, ptr [[TMP65]], align 4
3667 // CHECK6-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
3668 // CHECK6-NEXT: store i32 1, ptr [[TMP66]], align 4
3669 // CHECK6-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
3670 // CHECK6-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 4
3671 // CHECK6-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
3672 // CHECK6-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 4
3673 // CHECK6-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
3674 // CHECK6-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 4
3675 // CHECK6-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
3676 // CHECK6-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 4
3677 // CHECK6-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
3678 // CHECK6-NEXT: store ptr null, ptr [[TMP71]], align 4
3679 // CHECK6-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
3680 // CHECK6-NEXT: store ptr null, ptr [[TMP72]], align 4
3681 // CHECK6-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
3682 // CHECK6-NEXT: store i64 123, ptr [[TMP73]], align 8
3683 // CHECK6-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
3684 // CHECK6-NEXT: store i64 0, ptr [[TMP74]], align 8
3685 // CHECK6-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
3686 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
3687 // CHECK6-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
3688 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
3689 // CHECK6-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
3690 // CHECK6-NEXT: store i32 0, ptr [[TMP77]], align 4
3691 // CHECK6-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
3692 // CHECK6-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
3693 // CHECK6-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
3694 // CHECK6: omp_offload.failed24:
3695 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR2]]
3696 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT25]]
3697 // CHECK6: omp_offload.cont25:
3698 // CHECK6-NEXT: [[A26:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3699 // CHECK6-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
3700 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 4
3701 // CHECK6-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
3702 // CHECK6-NEXT: store ptr [[A26]], ptr [[TMP81]], align 4
3703 // CHECK6-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0
3704 // CHECK6-NEXT: store ptr null, ptr [[TMP82]], align 4
3705 // CHECK6-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
3706 // CHECK6-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
3707 // CHECK6-NEXT: [[TMP85:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
3708 // CHECK6-NEXT: store i32 3, ptr [[TMP85]], align 4
3709 // CHECK6-NEXT: [[TMP86:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
3710 // CHECK6-NEXT: store i32 1, ptr [[TMP86]], align 4
3711 // CHECK6-NEXT: [[TMP87:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
3712 // CHECK6-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 4
3713 // CHECK6-NEXT: [[TMP88:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
3714 // CHECK6-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 4
3715 // CHECK6-NEXT: [[TMP89:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
3716 // CHECK6-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 4
3717 // CHECK6-NEXT: [[TMP90:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
3718 // CHECK6-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 4
3719 // CHECK6-NEXT: [[TMP91:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
3720 // CHECK6-NEXT: store ptr null, ptr [[TMP91]], align 4
3721 // CHECK6-NEXT: [[TMP92:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
3722 // CHECK6-NEXT: store ptr null, ptr [[TMP92]], align 4
3723 // CHECK6-NEXT: [[TMP93:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
3724 // CHECK6-NEXT: store i64 123, ptr [[TMP93]], align 8
3725 // CHECK6-NEXT: [[TMP94:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
3726 // CHECK6-NEXT: store i64 0, ptr [[TMP94]], align 8
3727 // CHECK6-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
3728 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
3729 // CHECK6-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
3730 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
3731 // CHECK6-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
3732 // CHECK6-NEXT: store i32 0, ptr [[TMP97]], align 4
3733 // CHECK6-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
3734 // CHECK6-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
3735 // CHECK6-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
3736 // CHECK6: omp_offload.failed32:
3737 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR2]]
3738 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT33]]
3739 // CHECK6: omp_offload.cont33:
3740 // CHECK6-NEXT: [[A34:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3741 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i32 0, i32 0
3742 // CHECK6-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
3743 // CHECK6-NEXT: ret i32 [[TMP100]]
3746 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
3747 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
3748 // CHECK6-NEXT: entry:
3749 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3750 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3751 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3752 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
3753 // CHECK6-NEXT: ret void
3756 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
3757 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3758 // CHECK6-NEXT: entry:
3759 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3760 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3761 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3762 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3763 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3764 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3765 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3766 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3767 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3768 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
3769 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3770 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3771 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3772 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3773 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3774 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3775 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3776 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3777 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3778 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3779 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3780 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3781 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3782 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3783 // CHECK6: cond.true:
3784 // CHECK6-NEXT: br label [[COND_END:%.*]]
3785 // CHECK6: cond.false:
3786 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3787 // CHECK6-NEXT: br label [[COND_END]]
3788 // CHECK6: cond.end:
3789 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3790 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3791 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3792 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3793 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3794 // CHECK6: omp.inner.for.cond:
3795 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
3796 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
3797 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3798 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3799 // CHECK6: omp.inner.for.body:
3800 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
3801 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
3802 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP9]]
3803 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3804 // CHECK6: omp.inner.for.inc:
3805 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
3806 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
3807 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3808 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
3809 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
3810 // CHECK6: omp.inner.for.end:
3811 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3812 // CHECK6: omp.loop.exit:
3813 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3814 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3815 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3816 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3817 // CHECK6: .omp.final.then:
3818 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
3819 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
3820 // CHECK6: .omp.final.done:
3821 // CHECK6-NEXT: ret void
3824 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
3825 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3826 // CHECK6-NEXT: entry:
3827 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3828 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3829 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3830 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3831 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3832 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3833 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3834 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3835 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3836 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3837 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3838 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
3839 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3840 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3841 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3842 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3843 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3844 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3845 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3846 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3847 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3848 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3849 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3850 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3851 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3852 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3853 // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3854 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
3855 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3856 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3857 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
3858 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3859 // CHECK6: cond.true:
3860 // CHECK6-NEXT: br label [[COND_END:%.*]]
3861 // CHECK6: cond.false:
3862 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3863 // CHECK6-NEXT: br label [[COND_END]]
3864 // CHECK6: cond.end:
3865 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
3866 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3867 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3868 // CHECK6-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
3869 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3870 // CHECK6: omp.inner.for.cond:
3871 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
3872 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
3873 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
3874 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3875 // CHECK6: omp.inner.for.body:
3876 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3877 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
3878 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3879 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
3880 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3881 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
3882 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
3883 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]]
3884 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3885 // CHECK6: omp.body.continue:
3886 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3887 // CHECK6: omp.inner.for.inc:
3888 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3889 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
3890 // CHECK6-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3891 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
3892 // CHECK6: omp.inner.for.end:
3893 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3894 // CHECK6: omp.loop.exit:
3895 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
3896 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3897 // CHECK6-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3898 // CHECK6-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3899 // CHECK6: .omp.final.then:
3900 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
3901 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
3902 // CHECK6: .omp.final.done:
3903 // CHECK6-NEXT: ret void
3906 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
3907 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3908 // CHECK6-NEXT: entry:
3909 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3910 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3911 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3912 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
3913 // CHECK6-NEXT: ret void
3916 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
3917 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3918 // CHECK6-NEXT: entry:
3919 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3920 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3921 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3922 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3923 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3924 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3925 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3926 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3927 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3928 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
3929 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3930 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3931 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3932 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3933 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3934 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3935 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3936 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3937 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3938 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3939 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3940 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3941 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3942 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3943 // CHECK6: cond.true:
3944 // CHECK6-NEXT: br label [[COND_END:%.*]]
3945 // CHECK6: cond.false:
3946 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3947 // CHECK6-NEXT: br label [[COND_END]]
3948 // CHECK6: cond.end:
3949 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3950 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3951 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3952 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3953 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3954 // CHECK6: omp.inner.for.cond:
3955 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
3956 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
3957 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3958 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3959 // CHECK6: omp.inner.for.body:
3960 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
3961 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
3962 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP18]]
3963 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3964 // CHECK6: omp.inner.for.inc:
3965 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
3966 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
3967 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3968 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
3969 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
3970 // CHECK6: omp.inner.for.end:
3971 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3972 // CHECK6: omp.loop.exit:
3973 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3974 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3975 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3976 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3977 // CHECK6: .omp.final.then:
3978 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
3979 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
3980 // CHECK6: .omp.final.done:
3981 // CHECK6-NEXT: ret void
3984 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
3985 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3986 // CHECK6-NEXT: entry:
3987 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3988 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3989 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3990 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3991 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3992 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3993 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3994 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3995 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3996 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3997 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3998 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
3999 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4000 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4001 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4002 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4003 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4004 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4005 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4006 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4007 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4008 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4009 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4010 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4011 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4012 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4013 // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4014 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
4015 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4016 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4017 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
4018 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4019 // CHECK6: cond.true:
4020 // CHECK6-NEXT: br label [[COND_END:%.*]]
4021 // CHECK6: cond.false:
4022 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4023 // CHECK6-NEXT: br label [[COND_END]]
4024 // CHECK6: cond.end:
4025 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
4026 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
4027 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4028 // CHECK6-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
4029 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4030 // CHECK6: omp.inner.for.cond:
4031 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
4032 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
4033 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
4034 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4035 // CHECK6: omp.inner.for.body:
4036 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
4037 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
4038 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4039 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
4040 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4041 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
4042 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
4043 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]]
4044 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4045 // CHECK6: omp.body.continue:
4046 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4047 // CHECK6: omp.inner.for.inc:
4048 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
4049 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
4050 // CHECK6-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
4051 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
4052 // CHECK6: omp.inner.for.end:
4053 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4054 // CHECK6: omp.loop.exit:
4055 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
4056 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4057 // CHECK6-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4058 // CHECK6-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4059 // CHECK6: .omp.final.then:
4060 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4061 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4062 // CHECK6: .omp.final.done:
4063 // CHECK6-NEXT: ret void
4066 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
4067 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4068 // CHECK6-NEXT: entry:
4069 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4070 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4071 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4072 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
4073 // CHECK6-NEXT: ret void
4076 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
4077 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4078 // CHECK6-NEXT: entry:
4079 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4080 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4081 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4082 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4083 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4084 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4085 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4086 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4087 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4088 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4089 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4090 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4091 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4092 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4093 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4094 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
4095 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4096 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4097 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4098 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
4099 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4100 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4101 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4102 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4103 // CHECK6: cond.true:
4104 // CHECK6-NEXT: br label [[COND_END:%.*]]
4105 // CHECK6: cond.false:
4106 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4107 // CHECK6-NEXT: br label [[COND_END]]
4108 // CHECK6: cond.end:
4109 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4110 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4111 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4112 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
4113 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4114 // CHECK6: omp.inner.for.cond:
4115 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
4116 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
4117 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4118 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4119 // CHECK6: omp.inner.for.body:
4120 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
4121 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
4122 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP24]]
4123 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4124 // CHECK6: omp.inner.for.inc:
4125 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
4126 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
4127 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
4128 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
4129 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
4130 // CHECK6: omp.inner.for.end:
4131 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4132 // CHECK6: omp.loop.exit:
4133 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
4134 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4135 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
4136 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4137 // CHECK6: .omp.final.then:
4138 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4139 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4140 // CHECK6: .omp.final.done:
4141 // CHECK6-NEXT: ret void
4144 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
4145 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4146 // CHECK6-NEXT: entry:
4147 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4148 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4149 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4150 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4151 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4152 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4153 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4154 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4155 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4156 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4157 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4158 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4159 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4160 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4161 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4162 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4163 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4164 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4165 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4166 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4167 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4168 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4169 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4170 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4171 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4172 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4173 // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4174 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
4175 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
4176 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4177 // CHECK6: omp.dispatch.cond:
4178 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4179 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4180 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
4181 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4182 // CHECK6: cond.true:
4183 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4184 // CHECK6-NEXT: br label [[COND_END:%.*]]
4185 // CHECK6: cond.false:
4186 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4187 // CHECK6-NEXT: br label [[COND_END]]
4188 // CHECK6: cond.end:
4189 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
4190 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
4191 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4192 // CHECK6-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
4193 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
4194 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4195 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
4196 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4197 // CHECK6: omp.dispatch.body:
4198 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4199 // CHECK6: omp.inner.for.cond:
4200 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
4201 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
4202 // CHECK6-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
4203 // CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4204 // CHECK6: omp.inner.for.body:
4205 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
4206 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
4207 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4208 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
4209 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4210 // CHECK6-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
4211 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP15]]
4212 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]]
4213 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4214 // CHECK6: omp.body.continue:
4215 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4216 // CHECK6: omp.inner.for.inc:
4217 // CHECK6-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
4218 // CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
4219 // CHECK6-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
4220 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
4221 // CHECK6: omp.inner.for.end:
4222 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4223 // CHECK6: omp.dispatch.inc:
4224 // CHECK6-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4225 // CHECK6-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
4226 // CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
4227 // CHECK6-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
4228 // CHECK6-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4229 // CHECK6-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
4230 // CHECK6-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
4231 // CHECK6-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
4232 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
4233 // CHECK6: omp.dispatch.end:
4234 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
4235 // CHECK6-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4236 // CHECK6-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
4237 // CHECK6-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4238 // CHECK6: .omp.final.then:
4239 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4240 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4241 // CHECK6: .omp.final.done:
4242 // CHECK6-NEXT: ret void
4245 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
4246 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4247 // CHECK6-NEXT: entry:
4248 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4249 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4250 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4251 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
4252 // CHECK6-NEXT: ret void
4255 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
4256 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4257 // CHECK6-NEXT: entry:
4258 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4259 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4260 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4261 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4262 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4263 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4264 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4265 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4266 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4267 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4268 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4269 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4270 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4271 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4272 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4273 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
4274 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4275 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4276 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4277 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
4278 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4279 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4280 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4281 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4282 // CHECK6: cond.true:
4283 // CHECK6-NEXT: br label [[COND_END:%.*]]
4284 // CHECK6: cond.false:
4285 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4286 // CHECK6-NEXT: br label [[COND_END]]
4287 // CHECK6: cond.end:
4288 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4289 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4290 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4291 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
4292 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4293 // CHECK6: omp.inner.for.cond:
4294 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
4295 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
4296 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4297 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4298 // CHECK6: omp.inner.for.body:
4299 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
4300 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
4301 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP30]]
4302 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4303 // CHECK6: omp.inner.for.inc:
4304 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
4305 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
4306 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
4307 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
4308 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
4309 // CHECK6: omp.inner.for.end:
4310 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4311 // CHECK6: omp.loop.exit:
4312 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
4313 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4314 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
4315 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4316 // CHECK6: .omp.final.then:
4317 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4318 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4319 // CHECK6: .omp.final.done:
4320 // CHECK6-NEXT: ret void
4323 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
4324 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4325 // CHECK6-NEXT: entry:
4326 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4327 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4328 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4329 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4330 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4331 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4332 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4333 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4334 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4335 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4336 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4337 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4338 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4339 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4340 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4341 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4342 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4343 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4344 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4345 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4346 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4347 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4348 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4349 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4350 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4351 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4352 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4353 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4354 // CHECK6-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4355 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
4356 // CHECK6-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
4357 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4358 // CHECK6: omp.dispatch.cond:
4359 // CHECK6-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
4360 // CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
4361 // CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4362 // CHECK6: omp.dispatch.body:
4363 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4364 // CHECK6-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
4365 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4366 // CHECK6: omp.inner.for.cond:
4367 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
4368 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
4369 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4370 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4371 // CHECK6: omp.inner.for.body:
4372 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4373 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
4374 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4375 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
4376 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4377 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
4378 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
4379 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]]
4380 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4381 // CHECK6: omp.body.continue:
4382 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4383 // CHECK6: omp.inner.for.inc:
4384 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4385 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
4386 // CHECK6-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4387 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
4388 // CHECK6: omp.inner.for.end:
4389 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4390 // CHECK6: omp.dispatch.inc:
4391 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
4392 // CHECK6: omp.dispatch.end:
4393 // CHECK6-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
4394 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4395 // CHECK6-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4396 // CHECK6-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4397 // CHECK6: .omp.final.then:
4398 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4399 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4400 // CHECK6: .omp.final.done:
4401 // CHECK6-NEXT: ret void
4404 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
4405 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4406 // CHECK6-NEXT: entry:
4407 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4408 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4409 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4410 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
4411 // CHECK6-NEXT: ret void
4414 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
4415 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4416 // CHECK6-NEXT: entry:
4417 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4418 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4419 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4420 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4421 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4422 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4423 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4424 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4425 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4426 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4427 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4428 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4429 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4430 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4431 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4432 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
4433 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4434 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4435 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4436 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
4437 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4438 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4439 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4440 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4441 // CHECK6: cond.true:
4442 // CHECK6-NEXT: br label [[COND_END:%.*]]
4443 // CHECK6: cond.false:
4444 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4445 // CHECK6-NEXT: br label [[COND_END]]
4446 // CHECK6: cond.end:
4447 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4448 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4449 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4450 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
4451 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4452 // CHECK6: omp.inner.for.cond:
4453 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
4454 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
4455 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4456 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4457 // CHECK6: omp.inner.for.body:
4458 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
4459 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
4460 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP36]]
4461 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4462 // CHECK6: omp.inner.for.inc:
4463 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
4464 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
4465 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
4466 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
4467 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
4468 // CHECK6: omp.inner.for.end:
4469 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4470 // CHECK6: omp.loop.exit:
4471 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
4472 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4473 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
4474 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4475 // CHECK6: .omp.final.then:
4476 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4477 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4478 // CHECK6: .omp.final.done:
4479 // CHECK6-NEXT: ret void
4482 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
4483 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4484 // CHECK6-NEXT: entry:
4485 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4486 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4487 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4488 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4489 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4490 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4491 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4492 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4493 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4494 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4495 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4496 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4497 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4498 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4499 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4500 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4501 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4502 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4503 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4504 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4505 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4506 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4507 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4508 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4509 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4510 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4511 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4512 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4513 // CHECK6-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4514 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
4515 // CHECK6-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
4516 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4517 // CHECK6: omp.dispatch.cond:
4518 // CHECK6-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
4519 // CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
4520 // CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4521 // CHECK6: omp.dispatch.body:
4522 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4523 // CHECK6-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
4524 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4525 // CHECK6: omp.inner.for.cond:
4526 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
4527 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
4528 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4529 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4530 // CHECK6: omp.inner.for.body:
4531 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4532 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
4533 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4534 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
4535 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4536 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
4537 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
4538 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP39]]
4539 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4540 // CHECK6: omp.body.continue:
4541 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4542 // CHECK6: omp.inner.for.inc:
4543 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4544 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
4545 // CHECK6-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4546 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
4547 // CHECK6: omp.inner.for.end:
4548 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4549 // CHECK6: omp.dispatch.inc:
4550 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
4551 // CHECK6: omp.dispatch.end:
4552 // CHECK6-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
4553 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4554 // CHECK6-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4555 // CHECK6-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4556 // CHECK6: .omp.final.then:
4557 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4558 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4559 // CHECK6: .omp.final.done:
4560 // CHECK6-NEXT: ret void
4563 // CHECK9-LABEL: define {{[^@]+}}@_Z21teams_template_structv
4564 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
4565 // CHECK9-NEXT: entry:
4566 // CHECK9-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
4567 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
4568 // CHECK9-NEXT: ret i32 [[CALL]]
4571 // CHECK9-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
4572 // CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat {
4573 // CHECK9-NEXT: entry:
4574 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4575 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4576 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4577 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4578 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4579 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
4580 // CHECK9-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
4581 // CHECK9-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
4582 // CHECK9-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
4583 // CHECK9-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
4584 // CHECK9-NEXT: [[I7:%.*]] = alloca i32, align 4
4585 // CHECK9-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
4586 // CHECK9-NEXT: [[DOTOMP_LB21:%.*]] = alloca i32, align 4
4587 // CHECK9-NEXT: [[DOTOMP_UB22:%.*]] = alloca i32, align 4
4588 // CHECK9-NEXT: [[DOTOMP_IV23:%.*]] = alloca i32, align 4
4589 // CHECK9-NEXT: [[I24:%.*]] = alloca i32, align 4
4590 // CHECK9-NEXT: [[_TMP37:%.*]] = alloca i32, align 4
4591 // CHECK9-NEXT: [[DOTOMP_LB38:%.*]] = alloca i32, align 4
4592 // CHECK9-NEXT: [[DOTOMP_UB39:%.*]] = alloca i32, align 4
4593 // CHECK9-NEXT: [[DOTOMP_IV40:%.*]] = alloca i32, align 4
4594 // CHECK9-NEXT: [[I41:%.*]] = alloca i32, align 4
4595 // CHECK9-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
4596 // CHECK9-NEXT: [[DOTOMP_LB55:%.*]] = alloca i32, align 4
4597 // CHECK9-NEXT: [[DOTOMP_UB56:%.*]] = alloca i32, align 4
4598 // CHECK9-NEXT: [[DOTOMP_IV57:%.*]] = alloca i32, align 4
4599 // CHECK9-NEXT: [[I58:%.*]] = alloca i32, align 4
4600 // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4601 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4602 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4603 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4604 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4605 // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
4606 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4607 // CHECK9: omp.inner.for.cond:
4608 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
4609 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
4610 // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
4611 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4612 // CHECK9: omp.inner.for.body:
4613 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4614 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
4615 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4616 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
4617 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
4618 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
4619 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
4620 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
4621 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
4622 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4623 // CHECK9: omp.body.continue:
4624 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4625 // CHECK9: omp.inner.for.inc:
4626 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4627 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
4628 // CHECK9-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4629 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
4630 // CHECK9: omp.inner.for.end:
4631 // CHECK9-NEXT: store i32 123, ptr [[I]], align 4
4632 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
4633 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB5]], align 4
4634 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
4635 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV6]], align 4
4636 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
4637 // CHECK9: omp.inner.for.cond8:
4638 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
4639 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]]
4640 // CHECK9-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
4641 // CHECK9-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
4642 // CHECK9: omp.inner.for.body10:
4643 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4644 // CHECK9-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
4645 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
4646 // CHECK9-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
4647 // CHECK9-NEXT: [[A13:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4648 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
4649 // CHECK9-NEXT: [[IDXPROM14:%.*]] = sext i32 [[TMP10]] to i64
4650 // CHECK9-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [123 x i32], ptr [[A13]], i64 0, i64 [[IDXPROM14]]
4651 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX15]], align 4, !llvm.access.group [[ACC_GRP6]]
4652 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]]
4653 // CHECK9: omp.body.continue16:
4654 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]]
4655 // CHECK9: omp.inner.for.inc17:
4656 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4657 // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1
4658 // CHECK9-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4659 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
4660 // CHECK9: omp.inner.for.end19:
4661 // CHECK9-NEXT: store i32 123, ptr [[I7]], align 4
4662 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB21]], align 4
4663 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB22]], align 4
4664 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4
4665 // CHECK9-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV23]], align 4
4666 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]]
4667 // CHECK9: omp.inner.for.cond25:
4668 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
4669 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP9]]
4670 // CHECK9-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
4671 // CHECK9-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END36:%.*]]
4672 // CHECK9: omp.inner.for.body27:
4673 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]]
4674 // CHECK9-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP15]], 1
4675 // CHECK9-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
4676 // CHECK9-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP9]]
4677 // CHECK9-NEXT: [[A30:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4678 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP9]]
4679 // CHECK9-NEXT: [[IDXPROM31:%.*]] = sext i32 [[TMP16]] to i64
4680 // CHECK9-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds [123 x i32], ptr [[A30]], i64 0, i64 [[IDXPROM31]]
4681 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX32]], align 4, !llvm.access.group [[ACC_GRP9]]
4682 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE33:%.*]]
4683 // CHECK9: omp.body.continue33:
4684 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC34:%.*]]
4685 // CHECK9: omp.inner.for.inc34:
4686 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]]
4687 // CHECK9-NEXT: [[ADD35:%.*]] = add nsw i32 [[TMP17]], 1
4688 // CHECK9-NEXT: store i32 [[ADD35]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]]
4689 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
4690 // CHECK9: omp.inner.for.end36:
4691 // CHECK9-NEXT: store i32 123, ptr [[I24]], align 4
4692 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB38]], align 4
4693 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB39]], align 4
4694 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB38]], align 4
4695 // CHECK9-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV40]], align 4
4696 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND42:%.*]]
4697 // CHECK9: omp.inner.for.cond42:
4698 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
4699 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB39]], align 4, !llvm.access.group [[ACC_GRP12]]
4700 // CHECK9-NEXT: [[CMP43:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4701 // CHECK9-NEXT: br i1 [[CMP43]], label [[OMP_INNER_FOR_BODY44:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
4702 // CHECK9: omp.inner.for.body44:
4703 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12]]
4704 // CHECK9-NEXT: [[MUL45:%.*]] = mul nsw i32 [[TMP21]], 1
4705 // CHECK9-NEXT: [[ADD46:%.*]] = add nsw i32 0, [[MUL45]]
4706 // CHECK9-NEXT: store i32 [[ADD46]], ptr [[I41]], align 4, !llvm.access.group [[ACC_GRP12]]
4707 // CHECK9-NEXT: [[A47:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4708 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[I41]], align 4, !llvm.access.group [[ACC_GRP12]]
4709 // CHECK9-NEXT: [[IDXPROM48:%.*]] = sext i32 [[TMP22]] to i64
4710 // CHECK9-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds [123 x i32], ptr [[A47]], i64 0, i64 [[IDXPROM48]]
4711 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX49]], align 4, !llvm.access.group [[ACC_GRP12]]
4712 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE50:%.*]]
4713 // CHECK9: omp.body.continue50:
4714 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC51:%.*]]
4715 // CHECK9: omp.inner.for.inc51:
4716 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12]]
4717 // CHECK9-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP23]], 1
4718 // CHECK9-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12]]
4719 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND42]], !llvm.loop [[LOOP13:![0-9]+]]
4720 // CHECK9: omp.inner.for.end53:
4721 // CHECK9-NEXT: store i32 123, ptr [[I41]], align 4
4722 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB55]], align 4
4723 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB56]], align 4
4724 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4
4725 // CHECK9-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV57]], align 4
4726 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND59:%.*]]
4727 // CHECK9: omp.inner.for.cond59:
4728 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
4729 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP15]]
4730 // CHECK9-NEXT: [[CMP60:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
4731 // CHECK9-NEXT: br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END70:%.*]]
4732 // CHECK9: omp.inner.for.body61:
4733 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]]
4734 // CHECK9-NEXT: [[MUL62:%.*]] = mul nsw i32 [[TMP27]], 1
4735 // CHECK9-NEXT: [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
4736 // CHECK9-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP15]]
4737 // CHECK9-NEXT: [[A64:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4738 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP15]]
4739 // CHECK9-NEXT: [[IDXPROM65:%.*]] = sext i32 [[TMP28]] to i64
4740 // CHECK9-NEXT: [[ARRAYIDX66:%.*]] = getelementptr inbounds [123 x i32], ptr [[A64]], i64 0, i64 [[IDXPROM65]]
4741 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX66]], align 4, !llvm.access.group [[ACC_GRP15]]
4742 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE67:%.*]]
4743 // CHECK9: omp.body.continue67:
4744 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC68:%.*]]
4745 // CHECK9: omp.inner.for.inc68:
4746 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]]
4747 // CHECK9-NEXT: [[ADD69:%.*]] = add nsw i32 [[TMP29]], 1
4748 // CHECK9-NEXT: store i32 [[ADD69]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]]
4749 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
4750 // CHECK9: omp.inner.for.end70:
4751 // CHECK9-NEXT: store i32 123, ptr [[I58]], align 4
4752 // CHECK9-NEXT: [[A71:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4753 // CHECK9-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds [123 x i32], ptr [[A71]], i64 0, i64 0
4754 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX72]], align 4
4755 // CHECK9-NEXT: ret i32 [[TMP30]]
4758 // CHECK11-LABEL: define {{[^@]+}}@_Z21teams_template_structv
4759 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
4760 // CHECK11-NEXT: entry:
4761 // CHECK11-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
4762 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
4763 // CHECK11-NEXT: ret i32 [[CALL]]
4766 // CHECK11-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
4767 // CHECK11-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
4768 // CHECK11-NEXT: entry:
4769 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4770 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
4771 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4772 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4773 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4774 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
4775 // CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
4776 // CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
4777 // CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
4778 // CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
4779 // CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4
4780 // CHECK11-NEXT: [[_TMP19:%.*]] = alloca i32, align 4
4781 // CHECK11-NEXT: [[DOTOMP_LB20:%.*]] = alloca i32, align 4
4782 // CHECK11-NEXT: [[DOTOMP_UB21:%.*]] = alloca i32, align 4
4783 // CHECK11-NEXT: [[DOTOMP_IV22:%.*]] = alloca i32, align 4
4784 // CHECK11-NEXT: [[I23:%.*]] = alloca i32, align 4
4785 // CHECK11-NEXT: [[_TMP35:%.*]] = alloca i32, align 4
4786 // CHECK11-NEXT: [[DOTOMP_LB36:%.*]] = alloca i32, align 4
4787 // CHECK11-NEXT: [[DOTOMP_UB37:%.*]] = alloca i32, align 4
4788 // CHECK11-NEXT: [[DOTOMP_IV38:%.*]] = alloca i32, align 4
4789 // CHECK11-NEXT: [[I39:%.*]] = alloca i32, align 4
4790 // CHECK11-NEXT: [[_TMP51:%.*]] = alloca i32, align 4
4791 // CHECK11-NEXT: [[DOTOMP_LB52:%.*]] = alloca i32, align 4
4792 // CHECK11-NEXT: [[DOTOMP_UB53:%.*]] = alloca i32, align 4
4793 // CHECK11-NEXT: [[DOTOMP_IV54:%.*]] = alloca i32, align 4
4794 // CHECK11-NEXT: [[I55:%.*]] = alloca i32, align 4
4795 // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4796 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4797 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4798 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4799 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4800 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
4801 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4802 // CHECK11: omp.inner.for.cond:
4803 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]]
4804 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]]
4805 // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
4806 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4807 // CHECK11: omp.inner.for.body:
4808 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
4809 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
4810 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4811 // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
4812 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
4813 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
4814 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP4]]
4815 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]]
4816 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4817 // CHECK11: omp.body.continue:
4818 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4819 // CHECK11: omp.inner.for.inc:
4820 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
4821 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
4822 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
4823 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
4824 // CHECK11: omp.inner.for.end:
4825 // CHECK11-NEXT: store i32 123, ptr [[I]], align 4
4826 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
4827 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB5]], align 4
4828 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
4829 // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV6]], align 4
4830 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
4831 // CHECK11: omp.inner.for.cond8:
4832 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]]
4833 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]]
4834 // CHECK11-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
4835 // CHECK11-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]]
4836 // CHECK11: omp.inner.for.body10:
4837 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]]
4838 // CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
4839 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
4840 // CHECK11-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]]
4841 // CHECK11-NEXT: [[A13:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4842 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]]
4843 // CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [123 x i32], ptr [[A13]], i32 0, i32 [[TMP10]]
4844 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX14]], align 4, !llvm.access.group [[ACC_GRP7]]
4845 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]]
4846 // CHECK11: omp.body.continue15:
4847 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]]
4848 // CHECK11: omp.inner.for.inc16:
4849 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]]
4850 // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP11]], 1
4851 // CHECK11-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]]
4852 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]]
4853 // CHECK11: omp.inner.for.end18:
4854 // CHECK11-NEXT: store i32 123, ptr [[I7]], align 4
4855 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB20]], align 4
4856 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB21]], align 4
4857 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB20]], align 4
4858 // CHECK11-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV22]], align 4
4859 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND24:%.*]]
4860 // CHECK11: omp.inner.for.cond24:
4861 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
4862 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB21]], align 4, !llvm.access.group [[ACC_GRP10]]
4863 // CHECK11-NEXT: [[CMP25:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
4864 // CHECK11-NEXT: br i1 [[CMP25]], label [[OMP_INNER_FOR_BODY26:%.*]], label [[OMP_INNER_FOR_END34:%.*]]
4865 // CHECK11: omp.inner.for.body26:
4866 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10]]
4867 // CHECK11-NEXT: [[MUL27:%.*]] = mul nsw i32 [[TMP15]], 1
4868 // CHECK11-NEXT: [[ADD28:%.*]] = add nsw i32 0, [[MUL27]]
4869 // CHECK11-NEXT: store i32 [[ADD28]], ptr [[I23]], align 4, !llvm.access.group [[ACC_GRP10]]
4870 // CHECK11-NEXT: [[A29:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4871 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[I23]], align 4, !llvm.access.group [[ACC_GRP10]]
4872 // CHECK11-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [123 x i32], ptr [[A29]], i32 0, i32 [[TMP16]]
4873 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX30]], align 4, !llvm.access.group [[ACC_GRP10]]
4874 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE31:%.*]]
4875 // CHECK11: omp.body.continue31:
4876 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC32:%.*]]
4877 // CHECK11: omp.inner.for.inc32:
4878 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10]]
4879 // CHECK11-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP17]], 1
4880 // CHECK11-NEXT: store i32 [[ADD33]], ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10]]
4881 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND24]], !llvm.loop [[LOOP11:![0-9]+]]
4882 // CHECK11: omp.inner.for.end34:
4883 // CHECK11-NEXT: store i32 123, ptr [[I23]], align 4
4884 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB36]], align 4
4885 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB37]], align 4
4886 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB36]], align 4
4887 // CHECK11-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV38]], align 4
4888 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND40:%.*]]
4889 // CHECK11: omp.inner.for.cond40:
4890 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
4891 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB37]], align 4, !llvm.access.group [[ACC_GRP13]]
4892 // CHECK11-NEXT: [[CMP41:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4893 // CHECK11-NEXT: br i1 [[CMP41]], label [[OMP_INNER_FOR_BODY42:%.*]], label [[OMP_INNER_FOR_END50:%.*]]
4894 // CHECK11: omp.inner.for.body42:
4895 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13]]
4896 // CHECK11-NEXT: [[MUL43:%.*]] = mul nsw i32 [[TMP21]], 1
4897 // CHECK11-NEXT: [[ADD44:%.*]] = add nsw i32 0, [[MUL43]]
4898 // CHECK11-NEXT: store i32 [[ADD44]], ptr [[I39]], align 4, !llvm.access.group [[ACC_GRP13]]
4899 // CHECK11-NEXT: [[A45:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4900 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[I39]], align 4, !llvm.access.group [[ACC_GRP13]]
4901 // CHECK11-NEXT: [[ARRAYIDX46:%.*]] = getelementptr inbounds [123 x i32], ptr [[A45]], i32 0, i32 [[TMP22]]
4902 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX46]], align 4, !llvm.access.group [[ACC_GRP13]]
4903 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE47:%.*]]
4904 // CHECK11: omp.body.continue47:
4905 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC48:%.*]]
4906 // CHECK11: omp.inner.for.inc48:
4907 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13]]
4908 // CHECK11-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP23]], 1
4909 // CHECK11-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13]]
4910 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND40]], !llvm.loop [[LOOP14:![0-9]+]]
4911 // CHECK11: omp.inner.for.end50:
4912 // CHECK11-NEXT: store i32 123, ptr [[I39]], align 4
4913 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB52]], align 4
4914 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB53]], align 4
4915 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB52]], align 4
4916 // CHECK11-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV54]], align 4
4917 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND56:%.*]]
4918 // CHECK11: omp.inner.for.cond56:
4919 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]]
4920 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB53]], align 4, !llvm.access.group [[ACC_GRP16]]
4921 // CHECK11-NEXT: [[CMP57:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
4922 // CHECK11-NEXT: br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
4923 // CHECK11: omp.inner.for.body58:
4924 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16]]
4925 // CHECK11-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP27]], 1
4926 // CHECK11-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
4927 // CHECK11-NEXT: store i32 [[ADD60]], ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP16]]
4928 // CHECK11-NEXT: [[A61:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4929 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP16]]
4930 // CHECK11-NEXT: [[ARRAYIDX62:%.*]] = getelementptr inbounds [123 x i32], ptr [[A61]], i32 0, i32 [[TMP28]]
4931 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX62]], align 4, !llvm.access.group [[ACC_GRP16]]
4932 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE63:%.*]]
4933 // CHECK11: omp.body.continue63:
4934 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC64:%.*]]
4935 // CHECK11: omp.inner.for.inc64:
4936 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16]]
4937 // CHECK11-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP29]], 1
4938 // CHECK11-NEXT: store i32 [[ADD65]], ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16]]
4939 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP17:![0-9]+]]
4940 // CHECK11: omp.inner.for.end66:
4941 // CHECK11-NEXT: store i32 123, ptr [[I55]], align 4
4942 // CHECK11-NEXT: [[A67:%.*]] = getelementptr inbounds nuw [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4943 // CHECK11-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds [123 x i32], ptr [[A67]], i32 0, i32 0
4944 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX68]], align 4
4945 // CHECK11-NEXT: ret i32 [[TMP30]]
4948 // CHECK13-LABEL: define {{[^@]+}}@main
4949 // CHECK13-SAME: (i32 noundef signext [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
4950 // CHECK13-NEXT: entry:
4951 // CHECK13-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
4952 // CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
4953 // CHECK13-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
4954 // CHECK13-NEXT: [[N:%.*]] = alloca i32, align 4
4955 // CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
4956 // CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
4957 // CHECK13-NEXT: [[M:%.*]] = alloca i32, align 4
4958 // CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
4959 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 8
4960 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 8
4961 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 8
4962 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
4963 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
4964 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4965 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4966 // CHECK13-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
4967 // CHECK13-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
4968 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 8
4969 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 8
4970 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 8
4971 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 8
4972 // CHECK13-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
4973 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
4974 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
4975 // CHECK13-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
4976 // CHECK13-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
4977 // CHECK13-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8
4978 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 8
4979 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 8
4980 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 8
4981 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 8
4982 // CHECK13-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
4983 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
4984 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
4985 // CHECK13-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
4986 // CHECK13-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8
4987 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 8
4988 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 8
4989 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 8
4990 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 8
4991 // CHECK13-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
4992 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
4993 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
4994 // CHECK13-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
4995 // CHECK13-NEXT: [[M_CASTED48:%.*]] = alloca i64, align 8
4996 // CHECK13-NEXT: [[N_CASTED49:%.*]] = alloca i64, align 8
4997 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 8
4998 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 8
4999 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 8
5000 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 8
5001 // CHECK13-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
5002 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
5003 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
5004 // CHECK13-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5005 // CHECK13-NEXT: store i32 0, ptr [[RETVAL]], align 4
5006 // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
5007 // CHECK13-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
5008 // CHECK13-NEXT: store i32 100, ptr [[N]], align 4
5009 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
5010 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
5011 // CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
5012 // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
5013 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
5014 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
5015 // CHECK13-NEXT: store i32 10, ptr [[M]], align 4
5016 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
5017 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
5018 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
5019 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
5020 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 24, i1 false)
5021 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5022 // CHECK13-NEXT: store i64 [[TMP4]], ptr [[TMP6]], align 8
5023 // CHECK13-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5024 // CHECK13-NEXT: store i64 [[TMP4]], ptr [[TMP7]], align 8
5025 // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
5026 // CHECK13-NEXT: store ptr null, ptr [[TMP8]], align 8
5027 // CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
5028 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP9]], align 8
5029 // CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
5030 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP10]], align 8
5031 // CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
5032 // CHECK13-NEXT: store ptr null, ptr [[TMP11]], align 8
5033 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
5034 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 8
5035 // CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
5036 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 8
5037 // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
5038 // CHECK13-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 8
5039 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
5040 // CHECK13-NEXT: store ptr null, ptr [[TMP15]], align 8
5041 // CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5042 // CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5043 // CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
5044 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
5045 // CHECK13-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
5046 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5047 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
5048 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5049 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5050 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5051 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5052 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
5053 // CHECK13-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
5054 // CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
5055 // CHECK13-NEXT: store i32 3, ptr [[TMP23]], align 4
5056 // CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
5057 // CHECK13-NEXT: store i32 3, ptr [[TMP24]], align 4
5058 // CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
5059 // CHECK13-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 8
5060 // CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
5061 // CHECK13-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 8
5062 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
5063 // CHECK13-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 8
5064 // CHECK13-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
5065 // CHECK13-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 8
5066 // CHECK13-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
5067 // CHECK13-NEXT: store ptr null, ptr [[TMP29]], align 8
5068 // CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
5069 // CHECK13-NEXT: store ptr null, ptr [[TMP30]], align 8
5070 // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
5071 // CHECK13-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
5072 // CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
5073 // CHECK13-NEXT: store i64 0, ptr [[TMP32]], align 8
5074 // CHECK13-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
5075 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
5076 // CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
5077 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
5078 // CHECK13-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
5079 // CHECK13-NEXT: store i32 0, ptr [[TMP35]], align 4
5080 // CHECK13-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
5081 // CHECK13-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
5082 // CHECK13-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5083 // CHECK13: omp_offload.failed:
5084 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3:[0-9]+]]
5085 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
5086 // CHECK13: omp_offload.cont:
5087 // CHECK13-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
5088 // CHECK13-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
5089 // CHECK13-NEXT: [[TMP39:%.*]] = load i64, ptr [[N_CASTED3]], align 8
5090 // CHECK13-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP1]], 4
5091 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES7]], ptr align 8 @.offload_sizes.1, i64 24, i1 false)
5092 // CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
5093 // CHECK13-NEXT: store i64 [[TMP39]], ptr [[TMP41]], align 8
5094 // CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
5095 // CHECK13-NEXT: store i64 [[TMP39]], ptr [[TMP42]], align 8
5096 // CHECK13-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0
5097 // CHECK13-NEXT: store ptr null, ptr [[TMP43]], align 8
5098 // CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
5099 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP44]], align 8
5100 // CHECK13-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
5101 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP45]], align 8
5102 // CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 1
5103 // CHECK13-NEXT: store ptr null, ptr [[TMP46]], align 8
5104 // CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
5105 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP47]], align 8
5106 // CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
5107 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 8
5108 // CHECK13-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
5109 // CHECK13-NEXT: store i64 [[TMP40]], ptr [[TMP49]], align 8
5110 // CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 2
5111 // CHECK13-NEXT: store ptr null, ptr [[TMP50]], align 8
5112 // CHECK13-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
5113 // CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
5114 // CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
5115 // CHECK13-NEXT: [[TMP54:%.*]] = load i32, ptr [[N]], align 4
5116 // CHECK13-NEXT: store i32 [[TMP54]], ptr [[DOTCAPTURE_EXPR_9]], align 4
5117 // CHECK13-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
5118 // CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP55]], 0
5119 // CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
5120 // CHECK13-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
5121 // CHECK13-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
5122 // CHECK13-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
5123 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP56]], 1
5124 // CHECK13-NEXT: [[TMP57:%.*]] = zext i32 [[ADD14]] to i64
5125 // CHECK13-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
5126 // CHECK13-NEXT: store i32 3, ptr [[TMP58]], align 4
5127 // CHECK13-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
5128 // CHECK13-NEXT: store i32 3, ptr [[TMP59]], align 4
5129 // CHECK13-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
5130 // CHECK13-NEXT: store ptr [[TMP51]], ptr [[TMP60]], align 8
5131 // CHECK13-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
5132 // CHECK13-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 8
5133 // CHECK13-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
5134 // CHECK13-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 8
5135 // CHECK13-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
5136 // CHECK13-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP63]], align 8
5137 // CHECK13-NEXT: [[TMP64:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
5138 // CHECK13-NEXT: store ptr null, ptr [[TMP64]], align 8
5139 // CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
5140 // CHECK13-NEXT: store ptr null, ptr [[TMP65]], align 8
5141 // CHECK13-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
5142 // CHECK13-NEXT: store i64 [[TMP57]], ptr [[TMP66]], align 8
5143 // CHECK13-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
5144 // CHECK13-NEXT: store i64 0, ptr [[TMP67]], align 8
5145 // CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
5146 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP68]], align 4
5147 // CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
5148 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
5149 // CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
5150 // CHECK13-NEXT: store i32 0, ptr [[TMP70]], align 4
5151 // CHECK13-NEXT: [[TMP71:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
5152 // CHECK13-NEXT: [[TMP72:%.*]] = icmp ne i32 [[TMP71]], 0
5153 // CHECK13-NEXT: br i1 [[TMP72]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
5154 // CHECK13: omp_offload.failed16:
5155 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP39]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
5156 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT17]]
5157 // CHECK13: omp_offload.cont17:
5158 // CHECK13-NEXT: [[TMP73:%.*]] = load i32, ptr [[M]], align 4
5159 // CHECK13-NEXT: store i32 [[TMP73]], ptr [[M_CASTED]], align 4
5160 // CHECK13-NEXT: [[TMP74:%.*]] = load i64, ptr [[M_CASTED]], align 8
5161 // CHECK13-NEXT: [[TMP75:%.*]] = load i32, ptr [[N]], align 4
5162 // CHECK13-NEXT: store i32 [[TMP75]], ptr [[N_CASTED18]], align 4
5163 // CHECK13-NEXT: [[TMP76:%.*]] = load i64, ptr [[N_CASTED18]], align 8
5164 // CHECK13-NEXT: [[TMP77:%.*]] = mul nuw i64 [[TMP1]], 4
5165 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES22]], ptr align 8 @.offload_sizes.3, i64 32, i1 false)
5166 // CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
5167 // CHECK13-NEXT: store i64 [[TMP74]], ptr [[TMP78]], align 8
5168 // CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
5169 // CHECK13-NEXT: store i64 [[TMP74]], ptr [[TMP79]], align 8
5170 // CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
5171 // CHECK13-NEXT: store ptr null, ptr [[TMP80]], align 8
5172 // CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
5173 // CHECK13-NEXT: store i64 [[TMP76]], ptr [[TMP81]], align 8
5174 // CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
5175 // CHECK13-NEXT: store i64 [[TMP76]], ptr [[TMP82]], align 8
5176 // CHECK13-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1
5177 // CHECK13-NEXT: store ptr null, ptr [[TMP83]], align 8
5178 // CHECK13-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
5179 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP84]], align 8
5180 // CHECK13-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
5181 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP85]], align 8
5182 // CHECK13-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2
5183 // CHECK13-NEXT: store ptr null, ptr [[TMP86]], align 8
5184 // CHECK13-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
5185 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP87]], align 8
5186 // CHECK13-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
5187 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP88]], align 8
5188 // CHECK13-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
5189 // CHECK13-NEXT: store i64 [[TMP77]], ptr [[TMP89]], align 8
5190 // CHECK13-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 3
5191 // CHECK13-NEXT: store ptr null, ptr [[TMP90]], align 8
5192 // CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
5193 // CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
5194 // CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
5195 // CHECK13-NEXT: [[TMP94:%.*]] = load i32, ptr [[N]], align 4
5196 // CHECK13-NEXT: store i32 [[TMP94]], ptr [[DOTCAPTURE_EXPR_24]], align 4
5197 // CHECK13-NEXT: [[TMP95:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
5198 // CHECK13-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP95]], 0
5199 // CHECK13-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
5200 // CHECK13-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
5201 // CHECK13-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
5202 // CHECK13-NEXT: [[TMP96:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
5203 // CHECK13-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP96]], 1
5204 // CHECK13-NEXT: [[TMP97:%.*]] = zext i32 [[ADD29]] to i64
5205 // CHECK13-NEXT: [[TMP98:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
5206 // CHECK13-NEXT: store i32 3, ptr [[TMP98]], align 4
5207 // CHECK13-NEXT: [[TMP99:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
5208 // CHECK13-NEXT: store i32 4, ptr [[TMP99]], align 4
5209 // CHECK13-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
5210 // CHECK13-NEXT: store ptr [[TMP91]], ptr [[TMP100]], align 8
5211 // CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
5212 // CHECK13-NEXT: store ptr [[TMP92]], ptr [[TMP101]], align 8
5213 // CHECK13-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
5214 // CHECK13-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 8
5215 // CHECK13-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
5216 // CHECK13-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP103]], align 8
5217 // CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
5218 // CHECK13-NEXT: store ptr null, ptr [[TMP104]], align 8
5219 // CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
5220 // CHECK13-NEXT: store ptr null, ptr [[TMP105]], align 8
5221 // CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
5222 // CHECK13-NEXT: store i64 [[TMP97]], ptr [[TMP106]], align 8
5223 // CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
5224 // CHECK13-NEXT: store i64 0, ptr [[TMP107]], align 8
5225 // CHECK13-NEXT: [[TMP108:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
5226 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP108]], align 4
5227 // CHECK13-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
5228 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP109]], align 4
5229 // CHECK13-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
5230 // CHECK13-NEXT: store i32 0, ptr [[TMP110]], align 4
5231 // CHECK13-NEXT: [[TMP111:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
5232 // CHECK13-NEXT: [[TMP112:%.*]] = icmp ne i32 [[TMP111]], 0
5233 // CHECK13-NEXT: br i1 [[TMP112]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
5234 // CHECK13: omp_offload.failed31:
5235 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP74]], i64 [[TMP76]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
5236 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT32]]
5237 // CHECK13: omp_offload.cont32:
5238 // CHECK13-NEXT: [[TMP113:%.*]] = load i32, ptr [[N]], align 4
5239 // CHECK13-NEXT: store i32 [[TMP113]], ptr [[N_CASTED33]], align 4
5240 // CHECK13-NEXT: [[TMP114:%.*]] = load i64, ptr [[N_CASTED33]], align 8
5241 // CHECK13-NEXT: [[TMP115:%.*]] = mul nuw i64 [[TMP1]], 4
5242 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES37]], ptr align 8 @.offload_sizes.5, i64 24, i1 false)
5243 // CHECK13-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
5244 // CHECK13-NEXT: store i64 [[TMP114]], ptr [[TMP116]], align 8
5245 // CHECK13-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
5246 // CHECK13-NEXT: store i64 [[TMP114]], ptr [[TMP117]], align 8
5247 // CHECK13-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
5248 // CHECK13-NEXT: store ptr null, ptr [[TMP118]], align 8
5249 // CHECK13-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
5250 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP119]], align 8
5251 // CHECK13-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
5252 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP120]], align 8
5253 // CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
5254 // CHECK13-NEXT: store ptr null, ptr [[TMP121]], align 8
5255 // CHECK13-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
5256 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP122]], align 8
5257 // CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
5258 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP123]], align 8
5259 // CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
5260 // CHECK13-NEXT: store i64 [[TMP115]], ptr [[TMP124]], align 8
5261 // CHECK13-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
5262 // CHECK13-NEXT: store ptr null, ptr [[TMP125]], align 8
5263 // CHECK13-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
5264 // CHECK13-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
5265 // CHECK13-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
5266 // CHECK13-NEXT: [[TMP129:%.*]] = load i32, ptr [[N]], align 4
5267 // CHECK13-NEXT: store i32 [[TMP129]], ptr [[DOTCAPTURE_EXPR_39]], align 4
5268 // CHECK13-NEXT: [[TMP130:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
5269 // CHECK13-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP130]], 0
5270 // CHECK13-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
5271 // CHECK13-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
5272 // CHECK13-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
5273 // CHECK13-NEXT: [[TMP131:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
5274 // CHECK13-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP131]], 1
5275 // CHECK13-NEXT: [[TMP132:%.*]] = zext i32 [[ADD44]] to i64
5276 // CHECK13-NEXT: [[TMP133:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
5277 // CHECK13-NEXT: store i32 3, ptr [[TMP133]], align 4
5278 // CHECK13-NEXT: [[TMP134:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
5279 // CHECK13-NEXT: store i32 3, ptr [[TMP134]], align 4
5280 // CHECK13-NEXT: [[TMP135:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
5281 // CHECK13-NEXT: store ptr [[TMP126]], ptr [[TMP135]], align 8
5282 // CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
5283 // CHECK13-NEXT: store ptr [[TMP127]], ptr [[TMP136]], align 8
5284 // CHECK13-NEXT: [[TMP137:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
5285 // CHECK13-NEXT: store ptr [[TMP128]], ptr [[TMP137]], align 8
5286 // CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
5287 // CHECK13-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP138]], align 8
5288 // CHECK13-NEXT: [[TMP139:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
5289 // CHECK13-NEXT: store ptr null, ptr [[TMP139]], align 8
5290 // CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
5291 // CHECK13-NEXT: store ptr null, ptr [[TMP140]], align 8
5292 // CHECK13-NEXT: [[TMP141:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
5293 // CHECK13-NEXT: store i64 [[TMP132]], ptr [[TMP141]], align 8
5294 // CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
5295 // CHECK13-NEXT: store i64 0, ptr [[TMP142]], align 8
5296 // CHECK13-NEXT: [[TMP143:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
5297 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP143]], align 4
5298 // CHECK13-NEXT: [[TMP144:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
5299 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP144]], align 4
5300 // CHECK13-NEXT: [[TMP145:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
5301 // CHECK13-NEXT: store i32 0, ptr [[TMP145]], align 4
5302 // CHECK13-NEXT: [[TMP146:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
5303 // CHECK13-NEXT: [[TMP147:%.*]] = icmp ne i32 [[TMP146]], 0
5304 // CHECK13-NEXT: br i1 [[TMP147]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
5305 // CHECK13: omp_offload.failed46:
5306 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP114]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
5307 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT47]]
5308 // CHECK13: omp_offload.cont47:
5309 // CHECK13-NEXT: [[TMP148:%.*]] = load i32, ptr [[M]], align 4
5310 // CHECK13-NEXT: store i32 [[TMP148]], ptr [[M_CASTED48]], align 4
5311 // CHECK13-NEXT: [[TMP149:%.*]] = load i64, ptr [[M_CASTED48]], align 8
5312 // CHECK13-NEXT: [[TMP150:%.*]] = load i32, ptr [[N]], align 4
5313 // CHECK13-NEXT: store i32 [[TMP150]], ptr [[N_CASTED49]], align 4
5314 // CHECK13-NEXT: [[TMP151:%.*]] = load i64, ptr [[N_CASTED49]], align 8
5315 // CHECK13-NEXT: [[TMP152:%.*]] = mul nuw i64 [[TMP1]], 4
5316 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES53]], ptr align 8 @.offload_sizes.7, i64 32, i1 false)
5317 // CHECK13-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
5318 // CHECK13-NEXT: store i64 [[TMP149]], ptr [[TMP153]], align 8
5319 // CHECK13-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
5320 // CHECK13-NEXT: store i64 [[TMP149]], ptr [[TMP154]], align 8
5321 // CHECK13-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
5322 // CHECK13-NEXT: store ptr null, ptr [[TMP155]], align 8
5323 // CHECK13-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
5324 // CHECK13-NEXT: store i64 [[TMP151]], ptr [[TMP156]], align 8
5325 // CHECK13-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
5326 // CHECK13-NEXT: store i64 [[TMP151]], ptr [[TMP157]], align 8
5327 // CHECK13-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
5328 // CHECK13-NEXT: store ptr null, ptr [[TMP158]], align 8
5329 // CHECK13-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
5330 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP159]], align 8
5331 // CHECK13-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
5332 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP160]], align 8
5333 // CHECK13-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
5334 // CHECK13-NEXT: store ptr null, ptr [[TMP161]], align 8
5335 // CHECK13-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
5336 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP162]], align 8
5337 // CHECK13-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
5338 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP163]], align 8
5339 // CHECK13-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
5340 // CHECK13-NEXT: store i64 [[TMP152]], ptr [[TMP164]], align 8
5341 // CHECK13-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
5342 // CHECK13-NEXT: store ptr null, ptr [[TMP165]], align 8
5343 // CHECK13-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
5344 // CHECK13-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
5345 // CHECK13-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
5346 // CHECK13-NEXT: [[TMP169:%.*]] = load i32, ptr [[N]], align 4
5347 // CHECK13-NEXT: store i32 [[TMP169]], ptr [[DOTCAPTURE_EXPR_55]], align 4
5348 // CHECK13-NEXT: [[TMP170:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
5349 // CHECK13-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP170]], 0
5350 // CHECK13-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
5351 // CHECK13-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
5352 // CHECK13-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
5353 // CHECK13-NEXT: [[TMP171:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
5354 // CHECK13-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP171]], 1
5355 // CHECK13-NEXT: [[TMP172:%.*]] = zext i32 [[ADD60]] to i64
5356 // CHECK13-NEXT: [[TMP173:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
5357 // CHECK13-NEXT: store i32 3, ptr [[TMP173]], align 4
5358 // CHECK13-NEXT: [[TMP174:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
5359 // CHECK13-NEXT: store i32 4, ptr [[TMP174]], align 4
5360 // CHECK13-NEXT: [[TMP175:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
5361 // CHECK13-NEXT: store ptr [[TMP166]], ptr [[TMP175]], align 8
5362 // CHECK13-NEXT: [[TMP176:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
5363 // CHECK13-NEXT: store ptr [[TMP167]], ptr [[TMP176]], align 8
5364 // CHECK13-NEXT: [[TMP177:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
5365 // CHECK13-NEXT: store ptr [[TMP168]], ptr [[TMP177]], align 8
5366 // CHECK13-NEXT: [[TMP178:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
5367 // CHECK13-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP178]], align 8
5368 // CHECK13-NEXT: [[TMP179:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
5369 // CHECK13-NEXT: store ptr null, ptr [[TMP179]], align 8
5370 // CHECK13-NEXT: [[TMP180:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
5371 // CHECK13-NEXT: store ptr null, ptr [[TMP180]], align 8
5372 // CHECK13-NEXT: [[TMP181:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
5373 // CHECK13-NEXT: store i64 [[TMP172]], ptr [[TMP181]], align 8
5374 // CHECK13-NEXT: [[TMP182:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
5375 // CHECK13-NEXT: store i64 0, ptr [[TMP182]], align 8
5376 // CHECK13-NEXT: [[TMP183:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
5377 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP183]], align 4
5378 // CHECK13-NEXT: [[TMP184:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
5379 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP184]], align 4
5380 // CHECK13-NEXT: [[TMP185:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
5381 // CHECK13-NEXT: store i32 0, ptr [[TMP185]], align 4
5382 // CHECK13-NEXT: [[TMP186:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
5383 // CHECK13-NEXT: [[TMP187:%.*]] = icmp ne i32 [[TMP186]], 0
5384 // CHECK13-NEXT: br i1 [[TMP187]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
5385 // CHECK13: omp_offload.failed62:
5386 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP149]], i64 [[TMP151]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
5387 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT63]]
5388 // CHECK13: omp_offload.cont63:
5389 // CHECK13-NEXT: [[TMP188:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
5390 // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]])
5391 // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
5392 // CHECK13-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
5393 // CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]])
5394 // CHECK13-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4
5395 // CHECK13-NEXT: ret i32 [[TMP190]]
5398 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
5399 // CHECK13-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
5400 // CHECK13-NEXT: entry:
5401 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5402 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5403 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5404 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
5405 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5406 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5407 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5408 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5409 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
5410 // CHECK13-NEXT: ret void
5413 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
5414 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5415 // CHECK13-NEXT: entry:
5416 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5417 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5418 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5419 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5420 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5421 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5422 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5423 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5424 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5425 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5426 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5427 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5428 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5429 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5430 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
5431 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5432 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5433 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5434 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5435 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5436 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5437 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5438 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5439 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5440 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5441 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5442 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5443 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5444 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5445 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5446 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5447 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5448 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5449 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5450 // CHECK13: omp.precond.then:
5451 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5452 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5453 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
5454 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5455 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5456 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5457 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
5458 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5459 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5460 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5461 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
5462 // CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5463 // CHECK13: cond.true:
5464 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5465 // CHECK13-NEXT: br label [[COND_END:%.*]]
5466 // CHECK13: cond.false:
5467 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5468 // CHECK13-NEXT: br label [[COND_END]]
5469 // CHECK13: cond.end:
5470 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
5471 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5472 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5473 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
5474 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5475 // CHECK13: omp.inner.for.cond:
5476 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
5477 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
5478 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
5479 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5480 // CHECK13: omp.inner.for.body:
5481 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP13]]
5482 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
5483 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
5484 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
5485 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP13]]
5486 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5487 // CHECK13: omp.inner.for.inc:
5488 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
5489 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP13]]
5490 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5491 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
5492 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
5493 // CHECK13: omp.inner.for.end:
5494 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5495 // CHECK13: omp.loop.exit:
5496 // CHECK13-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5497 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
5498 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
5499 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5500 // CHECK13-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
5501 // CHECK13-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5502 // CHECK13: .omp.final.then:
5503 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5504 // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
5505 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5506 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5507 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5508 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
5509 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5510 // CHECK13: .omp.final.done:
5511 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5512 // CHECK13: omp.precond.end:
5513 // CHECK13-NEXT: ret void
5516 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
5517 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5518 // CHECK13-NEXT: entry:
5519 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5520 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5521 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5522 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5523 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5524 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5525 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5526 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5527 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5528 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5529 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5530 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5531 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5532 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5533 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5534 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5535 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
5536 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5537 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5538 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5539 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5540 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5541 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5542 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5543 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5544 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5545 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5546 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5547 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5548 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5549 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5550 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5551 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5552 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5553 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5554 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5555 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5556 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5557 // CHECK13: omp.precond.then:
5558 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5559 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5560 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
5561 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5562 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
5563 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5564 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
5565 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5566 // CHECK13-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
5567 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5568 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5569 // CHECK13-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5570 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
5571 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5572 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5573 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5574 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
5575 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5576 // CHECK13: cond.true:
5577 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5578 // CHECK13-NEXT: br label [[COND_END:%.*]]
5579 // CHECK13: cond.false:
5580 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5581 // CHECK13-NEXT: br label [[COND_END]]
5582 // CHECK13: cond.end:
5583 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
5584 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5585 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5586 // CHECK13-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
5587 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5588 // CHECK13: omp.inner.for.cond:
5589 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
5590 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
5591 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
5592 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5593 // CHECK13: omp.inner.for.body:
5594 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
5595 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
5596 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5597 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
5598 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
5599 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
5600 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
5601 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]]
5602 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5603 // CHECK13: omp.body.continue:
5604 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5605 // CHECK13: omp.inner.for.inc:
5606 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
5607 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
5608 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
5609 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
5610 // CHECK13: omp.inner.for.end:
5611 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5612 // CHECK13: omp.loop.exit:
5613 // CHECK13-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5614 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
5615 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
5616 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5617 // CHECK13-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5618 // CHECK13-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5619 // CHECK13: .omp.final.then:
5620 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5621 // CHECK13-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
5622 // CHECK13-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
5623 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
5624 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
5625 // CHECK13-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
5626 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5627 // CHECK13: .omp.final.done:
5628 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5629 // CHECK13: omp.precond.end:
5630 // CHECK13-NEXT: ret void
5633 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
5634 // CHECK13-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5635 // CHECK13-NEXT: entry:
5636 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5637 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5638 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5639 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
5640 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5641 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5642 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5643 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5644 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
5645 // CHECK13-NEXT: ret void
5648 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
5649 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5650 // CHECK13-NEXT: entry:
5651 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5652 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5653 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5654 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5655 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5656 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5657 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5658 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5659 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5660 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5661 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5662 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5663 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5664 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5665 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
5666 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5667 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5668 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5669 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5670 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5671 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5672 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5673 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5674 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5675 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5676 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5677 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5678 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5679 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5680 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5681 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5682 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5683 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5684 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5685 // CHECK13: omp.precond.then:
5686 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5687 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5688 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
5689 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5690 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5691 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5692 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
5693 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5694 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5695 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5696 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
5697 // CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5698 // CHECK13: cond.true:
5699 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5700 // CHECK13-NEXT: br label [[COND_END:%.*]]
5701 // CHECK13: cond.false:
5702 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5703 // CHECK13-NEXT: br label [[COND_END]]
5704 // CHECK13: cond.end:
5705 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
5706 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5707 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5708 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
5709 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5710 // CHECK13: omp.inner.for.cond:
5711 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
5712 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
5713 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
5714 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5715 // CHECK13: omp.inner.for.body:
5716 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP22]]
5717 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
5718 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
5719 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
5720 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP22]]
5721 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5722 // CHECK13: omp.inner.for.inc:
5723 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
5724 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP22]]
5725 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5726 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
5727 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
5728 // CHECK13: omp.inner.for.end:
5729 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5730 // CHECK13: omp.loop.exit:
5731 // CHECK13-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5732 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
5733 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
5734 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5735 // CHECK13-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
5736 // CHECK13-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5737 // CHECK13: .omp.final.then:
5738 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5739 // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
5740 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5741 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5742 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5743 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
5744 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5745 // CHECK13: .omp.final.done:
5746 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5747 // CHECK13: omp.precond.end:
5748 // CHECK13-NEXT: ret void
5751 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
5752 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5753 // CHECK13-NEXT: entry:
5754 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5755 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5756 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5757 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5758 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5759 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5760 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5761 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5762 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5763 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5764 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5765 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5766 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5767 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5768 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5769 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5770 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
5771 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5772 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5773 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5774 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5775 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5776 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5777 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5778 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5779 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5780 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5781 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5782 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5783 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5784 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5785 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5786 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5787 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5788 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5789 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5790 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5791 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5792 // CHECK13: omp.precond.then:
5793 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5794 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5795 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
5796 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5797 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
5798 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5799 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
5800 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5801 // CHECK13-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
5802 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5803 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5804 // CHECK13-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5805 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
5806 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5807 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5808 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5809 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
5810 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5811 // CHECK13: cond.true:
5812 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5813 // CHECK13-NEXT: br label [[COND_END:%.*]]
5814 // CHECK13: cond.false:
5815 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5816 // CHECK13-NEXT: br label [[COND_END]]
5817 // CHECK13: cond.end:
5818 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
5819 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5820 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5821 // CHECK13-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
5822 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5823 // CHECK13: omp.inner.for.cond:
5824 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
5825 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
5826 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
5827 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5828 // CHECK13: omp.inner.for.body:
5829 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
5830 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
5831 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5832 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
5833 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
5834 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
5835 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
5836 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
5837 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5838 // CHECK13: omp.body.continue:
5839 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5840 // CHECK13: omp.inner.for.inc:
5841 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
5842 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
5843 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
5844 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
5845 // CHECK13: omp.inner.for.end:
5846 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5847 // CHECK13: omp.loop.exit:
5848 // CHECK13-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5849 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
5850 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
5851 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5852 // CHECK13-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5853 // CHECK13-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5854 // CHECK13: .omp.final.then:
5855 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5856 // CHECK13-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
5857 // CHECK13-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
5858 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
5859 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
5860 // CHECK13-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
5861 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5862 // CHECK13: .omp.final.done:
5863 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5864 // CHECK13: omp.precond.end:
5865 // CHECK13-NEXT: ret void
5868 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
5869 // CHECK13-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5870 // CHECK13-NEXT: entry:
5871 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
5872 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5873 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5874 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5875 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5876 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
5877 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
5878 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
5879 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5880 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5881 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5882 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5883 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
5884 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
5885 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5886 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
5887 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
5888 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
5889 // CHECK13-NEXT: ret void
5892 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
5893 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
5894 // CHECK13-NEXT: entry:
5895 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5896 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5897 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5898 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5899 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5900 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
5901 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5902 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5903 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5904 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5905 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5906 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5907 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5908 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5909 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5910 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
5911 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
5912 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5913 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5914 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5915 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5916 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5917 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
5918 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5919 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5920 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5921 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5922 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5923 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5924 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5925 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5926 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
5927 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
5928 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5929 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5930 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5931 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5932 // CHECK13: omp.precond.then:
5933 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5934 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
5935 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
5936 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5937 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5938 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
5939 // CHECK13-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5940 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
5941 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
5942 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5943 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
5944 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5945 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5946 // CHECK13: cond.true:
5947 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
5948 // CHECK13-NEXT: br label [[COND_END:%.*]]
5949 // CHECK13: cond.false:
5950 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5951 // CHECK13-NEXT: br label [[COND_END]]
5952 // CHECK13: cond.end:
5953 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5954 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5955 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5956 // CHECK13-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
5957 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5958 // CHECK13: omp.inner.for.cond:
5959 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
5960 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
5961 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
5962 // CHECK13-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
5963 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5964 // CHECK13: omp.inner.for.body:
5965 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
5966 // CHECK13-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
5967 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
5968 // CHECK13-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5969 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
5970 // CHECK13-NEXT: store i32 [[TMP21]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP28]]
5971 // CHECK13-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP28]]
5972 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i64 [[TMP18]], i64 [[TMP20]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP22]]), !llvm.access.group [[ACC_GRP28]]
5973 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5974 // CHECK13: omp.inner.for.inc:
5975 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
5976 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
5977 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5978 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
5979 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
5980 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
5981 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
5982 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
5983 // CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
5984 // CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
5985 // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
5986 // CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
5987 // CHECK13-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
5988 // CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
5989 // CHECK13-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
5990 // CHECK13-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
5991 // CHECK13: cond.true11:
5992 // CHECK13-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
5993 // CHECK13-NEXT: br label [[COND_END13:%.*]]
5994 // CHECK13: cond.false12:
5995 // CHECK13-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
5996 // CHECK13-NEXT: br label [[COND_END13]]
5997 // CHECK13: cond.end13:
5998 // CHECK13-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE11]] ], [ [[TMP32]], [[COND_FALSE12]] ]
5999 // CHECK13-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
6000 // CHECK13-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
6001 // CHECK13-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
6002 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
6003 // CHECK13: omp.inner.for.end:
6004 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6005 // CHECK13: omp.loop.exit:
6006 // CHECK13-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6007 // CHECK13-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4
6008 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP35]])
6009 // CHECK13-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6010 // CHECK13-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
6011 // CHECK13-NEXT: br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6012 // CHECK13: .omp.final.then:
6013 // CHECK13-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6014 // CHECK13-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP38]], 0
6015 // CHECK13-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
6016 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
6017 // CHECK13-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
6018 // CHECK13-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
6019 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6020 // CHECK13: .omp.final.done:
6021 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6022 // CHECK13: omp.precond.end:
6023 // CHECK13-NEXT: ret void
6026 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
6027 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
6028 // CHECK13-NEXT: entry:
6029 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6030 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6031 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6032 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6033 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6034 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6035 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6036 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6037 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6038 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6039 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6040 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6041 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6042 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6043 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6044 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6045 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6046 // CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
6047 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6048 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6049 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6050 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6051 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6052 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6053 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6054 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
6055 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6056 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6057 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6058 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6059 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6060 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6061 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6062 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6063 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6064 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
6065 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6066 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6067 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6068 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6069 // CHECK13: omp.precond.then:
6070 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6071 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6072 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
6073 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6074 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
6075 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6076 // CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
6077 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
6078 // CHECK13-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
6079 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6080 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6081 // CHECK13-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6082 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
6083 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6084 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6085 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6086 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
6087 // CHECK13-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6088 // CHECK13: cond.true:
6089 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6090 // CHECK13-NEXT: br label [[COND_END:%.*]]
6091 // CHECK13: cond.false:
6092 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6093 // CHECK13-NEXT: br label [[COND_END]]
6094 // CHECK13: cond.end:
6095 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
6096 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
6097 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6098 // CHECK13-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
6099 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6100 // CHECK13: omp.inner.for.cond:
6101 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
6102 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]]
6103 // CHECK13-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
6104 // CHECK13-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6105 // CHECK13: omp.inner.for.body:
6106 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
6107 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
6108 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6109 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
6110 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
6111 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
6112 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
6113 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
6114 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6115 // CHECK13: omp.body.continue:
6116 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6117 // CHECK13: omp.inner.for.inc:
6118 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
6119 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], 1
6120 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
6121 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
6122 // CHECK13: omp.inner.for.end:
6123 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6124 // CHECK13: omp.loop.exit:
6125 // CHECK13-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6126 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
6127 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
6128 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6129 // CHECK13-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6130 // CHECK13-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6131 // CHECK13: .omp.final.then:
6132 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6133 // CHECK13-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP25]], 0
6134 // CHECK13-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
6135 // CHECK13-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
6136 // CHECK13-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
6137 // CHECK13-NEXT: store i32 [[ADD12]], ptr [[I5]], align 4
6138 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6139 // CHECK13: .omp.final.done:
6140 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6141 // CHECK13: omp.precond.end:
6142 // CHECK13-NEXT: ret void
6145 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
6146 // CHECK13-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
6147 // CHECK13-NEXT: entry:
6148 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6149 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6150 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6151 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
6152 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6153 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6154 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6155 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6156 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
6157 // CHECK13-NEXT: ret void
6160 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
6161 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
6162 // CHECK13-NEXT: entry:
6163 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6164 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6165 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6166 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6167 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6168 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6169 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6170 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6171 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6172 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6173 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6174 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6175 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6176 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6177 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
6178 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6179 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6180 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6181 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6182 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6183 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6184 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6185 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6186 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6187 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
6188 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6189 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6190 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6191 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6192 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6193 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6194 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6195 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6196 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6197 // CHECK13: omp.precond.then:
6198 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
6199 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6200 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
6201 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6202 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6203 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6204 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
6205 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6206 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6207 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6208 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
6209 // CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6210 // CHECK13: cond.true:
6211 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6212 // CHECK13-NEXT: br label [[COND_END:%.*]]
6213 // CHECK13: cond.false:
6214 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6215 // CHECK13-NEXT: br label [[COND_END]]
6216 // CHECK13: cond.end:
6217 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
6218 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
6219 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
6220 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
6221 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6222 // CHECK13: omp.inner.for.cond:
6223 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
6224 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
6225 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
6226 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6227 // CHECK13: omp.inner.for.body:
6228 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP34]]
6229 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
6230 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
6231 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
6232 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP34]]
6233 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6234 // CHECK13: omp.inner.for.inc:
6235 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
6236 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP34]]
6237 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
6238 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
6239 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
6240 // CHECK13: omp.inner.for.end:
6241 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6242 // CHECK13: omp.loop.exit:
6243 // CHECK13-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6244 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
6245 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
6246 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6247 // CHECK13-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
6248 // CHECK13-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6249 // CHECK13: .omp.final.then:
6250 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6251 // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
6252 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6253 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6254 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6255 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
6256 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6257 // CHECK13: .omp.final.done:
6258 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6259 // CHECK13: omp.precond.end:
6260 // CHECK13-NEXT: ret void
6263 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
6264 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
6265 // CHECK13-NEXT: entry:
6266 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6267 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6268 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6269 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6270 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6271 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6272 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6273 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6274 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6275 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6276 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6277 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6278 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6279 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6280 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6281 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6282 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
6283 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6284 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6285 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6286 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6287 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6288 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6289 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6290 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6291 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6292 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6293 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6294 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
6295 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6296 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6297 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6298 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6299 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6300 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6301 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6302 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6303 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6304 // CHECK13: omp.precond.then:
6305 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6306 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6307 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
6308 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6309 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
6310 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6311 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
6312 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
6313 // CHECK13-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
6314 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6315 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6316 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6317 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6318 // CHECK13-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6319 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
6320 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
6321 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
6322 // CHECK13: omp.dispatch.cond:
6323 // CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6324 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
6325 // CHECK13-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
6326 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
6327 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6328 // CHECK13: omp.dispatch.body:
6329 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6330 // CHECK13-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
6331 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6332 // CHECK13: omp.inner.for.cond:
6333 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]]
6334 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]]
6335 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6336 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6337 // CHECK13: omp.inner.for.body:
6338 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
6339 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
6340 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6341 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
6342 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
6343 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
6344 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
6345 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]]
6346 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6347 // CHECK13: omp.body.continue:
6348 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6349 // CHECK13: omp.inner.for.inc:
6350 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
6351 // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
6352 // CHECK13-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
6353 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
6354 // CHECK13: omp.inner.for.end:
6355 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
6356 // CHECK13: omp.dispatch.inc:
6357 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
6358 // CHECK13: omp.dispatch.end:
6359 // CHECK13-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6360 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
6361 // CHECK13-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP23]])
6362 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6363 // CHECK13-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
6364 // CHECK13-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6365 // CHECK13: .omp.final.then:
6366 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6367 // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP26]], 0
6368 // CHECK13-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6369 // CHECK13-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
6370 // CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
6371 // CHECK13-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
6372 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6373 // CHECK13: .omp.final.done:
6374 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6375 // CHECK13: omp.precond.end:
6376 // CHECK13-NEXT: ret void
6379 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
6380 // CHECK13-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
6381 // CHECK13-NEXT: entry:
6382 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
6383 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6384 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6385 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6386 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6387 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6388 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
6389 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
6390 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6391 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6392 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6393 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6394 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
6395 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
6396 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6397 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
6398 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
6399 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
6400 // CHECK13-NEXT: ret void
6403 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
6404 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
6405 // CHECK13-NEXT: entry:
6406 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6407 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6408 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6409 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6410 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6411 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6412 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6413 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6414 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6415 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6416 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6417 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6418 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6419 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6420 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6421 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
6422 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6423 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6424 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6425 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6426 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6427 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6428 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
6429 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6430 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6431 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6432 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6433 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6434 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6435 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6436 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6437 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6438 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
6439 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6440 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6441 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6442 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6443 // CHECK13: omp.precond.then:
6444 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
6445 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6446 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
6447 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6448 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6449 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6450 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
6451 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6452 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6453 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6454 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
6455 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6456 // CHECK13: cond.true:
6457 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6458 // CHECK13-NEXT: br label [[COND_END:%.*]]
6459 // CHECK13: cond.false:
6460 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6461 // CHECK13-NEXT: br label [[COND_END]]
6462 // CHECK13: cond.end:
6463 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
6464 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
6465 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
6466 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
6467 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6468 // CHECK13: omp.inner.for.cond:
6469 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]]
6470 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
6471 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
6472 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6473 // CHECK13: omp.inner.for.body:
6474 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP40]]
6475 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
6476 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
6477 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
6478 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP40]]
6479 // CHECK13-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP40]]
6480 // CHECK13-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP40]]
6481 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP21]]), !llvm.access.group [[ACC_GRP40]]
6482 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6483 // CHECK13: omp.inner.for.inc:
6484 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
6485 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP40]]
6486 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
6487 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
6488 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
6489 // CHECK13: omp.inner.for.end:
6490 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6491 // CHECK13: omp.loop.exit:
6492 // CHECK13-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6493 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
6494 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP25]])
6495 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6496 // CHECK13-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
6497 // CHECK13-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6498 // CHECK13: .omp.final.then:
6499 // CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6500 // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP28]], 0
6501 // CHECK13-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6502 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
6503 // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
6504 // CHECK13-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
6505 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6506 // CHECK13: .omp.final.done:
6507 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6508 // CHECK13: omp.precond.end:
6509 // CHECK13-NEXT: ret void
6512 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
6513 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
6514 // CHECK13-NEXT: entry:
6515 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6516 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6517 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6518 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6519 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6520 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6521 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6522 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6523 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6524 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6525 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6526 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6527 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6528 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6529 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6530 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6531 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6532 // CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
6533 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6534 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6535 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6536 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6537 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6538 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6539 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6540 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
6541 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6542 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6543 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6544 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6545 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6546 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6547 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6548 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6549 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6550 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
6551 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6552 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6553 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6554 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6555 // CHECK13: omp.precond.then:
6556 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6557 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6558 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
6559 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6560 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
6561 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6562 // CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
6563 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
6564 // CHECK13-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
6565 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6566 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6567 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
6568 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6569 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6570 // CHECK13-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6571 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
6572 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
6573 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
6574 // CHECK13: omp.dispatch.cond:
6575 // CHECK13-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6576 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
6577 // CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
6578 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
6579 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6580 // CHECK13: omp.dispatch.body:
6581 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6582 // CHECK13-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
6583 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6584 // CHECK13: omp.inner.for.cond:
6585 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]]
6586 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP43]]
6587 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6588 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6589 // CHECK13: omp.inner.for.body:
6590 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
6591 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
6592 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6593 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
6594 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
6595 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
6596 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
6597 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]]
6598 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6599 // CHECK13: omp.body.continue:
6600 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6601 // CHECK13: omp.inner.for.inc:
6602 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
6603 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], 1
6604 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
6605 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
6606 // CHECK13: omp.inner.for.end:
6607 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
6608 // CHECK13: omp.dispatch.inc:
6609 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
6610 // CHECK13: omp.dispatch.end:
6611 // CHECK13-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6612 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
6613 // CHECK13-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP24]])
6614 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6615 // CHECK13-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
6616 // CHECK13-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6617 // CHECK13: .omp.final.then:
6618 // CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6619 // CHECK13-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP27]], 0
6620 // CHECK13-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
6621 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
6622 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
6623 // CHECK13-NEXT: store i32 [[ADD11]], ptr [[I5]], align 4
6624 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6625 // CHECK13: .omp.final.done:
6626 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6627 // CHECK13: omp.precond.end:
6628 // CHECK13-NEXT: ret void
6631 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
6632 // CHECK13-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
6633 // CHECK13-NEXT: entry:
6634 // CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
6635 // CHECK13-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
6636 // CHECK13-NEXT: [[M:%.*]] = alloca i32, align 4
6637 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
6638 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
6639 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
6640 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6641 // CHECK13-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
6642 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 8
6643 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 8
6644 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 8
6645 // CHECK13-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
6646 // CHECK13-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6647 // CHECK13-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
6648 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 8
6649 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 8
6650 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 8
6651 // CHECK13-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
6652 // CHECK13-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6653 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 8
6654 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 8
6655 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 8
6656 // CHECK13-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
6657 // CHECK13-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6658 // CHECK13-NEXT: [[M_CASTED22:%.*]] = alloca i64, align 8
6659 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 8
6660 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 8
6661 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 8
6662 // CHECK13-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
6663 // CHECK13-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6664 // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
6665 // CHECK13-NEXT: store i32 10, ptr [[M]], align 4
6666 // CHECK13-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6667 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP0]], align 8
6668 // CHECK13-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6669 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
6670 // CHECK13-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
6671 // CHECK13-NEXT: store ptr null, ptr [[TMP2]], align 8
6672 // CHECK13-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6673 // CHECK13-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6674 // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
6675 // CHECK13-NEXT: store i32 3, ptr [[TMP5]], align 4
6676 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
6677 // CHECK13-NEXT: store i32 1, ptr [[TMP6]], align 4
6678 // CHECK13-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
6679 // CHECK13-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
6680 // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
6681 // CHECK13-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
6682 // CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
6683 // CHECK13-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 8
6684 // CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
6685 // CHECK13-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 8
6686 // CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
6687 // CHECK13-NEXT: store ptr null, ptr [[TMP11]], align 8
6688 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
6689 // CHECK13-NEXT: store ptr null, ptr [[TMP12]], align 8
6690 // CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
6691 // CHECK13-NEXT: store i64 10, ptr [[TMP13]], align 8
6692 // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
6693 // CHECK13-NEXT: store i64 0, ptr [[TMP14]], align 8
6694 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
6695 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
6696 // CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
6697 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
6698 // CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
6699 // CHECK13-NEXT: store i32 0, ptr [[TMP17]], align 4
6700 // CHECK13-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
6701 // CHECK13-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
6702 // CHECK13-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6703 // CHECK13: omp_offload.failed:
6704 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR3]]
6705 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
6706 // CHECK13: omp_offload.cont:
6707 // CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
6708 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP20]], align 8
6709 // CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
6710 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP21]], align 8
6711 // CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
6712 // CHECK13-NEXT: store ptr null, ptr [[TMP22]], align 8
6713 // CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
6714 // CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
6715 // CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
6716 // CHECK13-NEXT: store i32 3, ptr [[TMP25]], align 4
6717 // CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
6718 // CHECK13-NEXT: store i32 1, ptr [[TMP26]], align 4
6719 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
6720 // CHECK13-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
6721 // CHECK13-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
6722 // CHECK13-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
6723 // CHECK13-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
6724 // CHECK13-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 8
6725 // CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
6726 // CHECK13-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 8
6727 // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
6728 // CHECK13-NEXT: store ptr null, ptr [[TMP31]], align 8
6729 // CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
6730 // CHECK13-NEXT: store ptr null, ptr [[TMP32]], align 8
6731 // CHECK13-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
6732 // CHECK13-NEXT: store i64 10, ptr [[TMP33]], align 8
6733 // CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
6734 // CHECK13-NEXT: store i64 0, ptr [[TMP34]], align 8
6735 // CHECK13-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
6736 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
6737 // CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
6738 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
6739 // CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
6740 // CHECK13-NEXT: store i32 0, ptr [[TMP37]], align 4
6741 // CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
6742 // CHECK13-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
6743 // CHECK13-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
6744 // CHECK13: omp_offload.failed6:
6745 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR3]]
6746 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT7]]
6747 // CHECK13: omp_offload.cont7:
6748 // CHECK13-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
6749 // CHECK13-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
6750 // CHECK13-NEXT: [[TMP41:%.*]] = load i64, ptr [[M_CASTED]], align 8
6751 // CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
6752 // CHECK13-NEXT: store i64 [[TMP41]], ptr [[TMP42]], align 8
6753 // CHECK13-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
6754 // CHECK13-NEXT: store i64 [[TMP41]], ptr [[TMP43]], align 8
6755 // CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 0
6756 // CHECK13-NEXT: store ptr null, ptr [[TMP44]], align 8
6757 // CHECK13-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
6758 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP45]], align 8
6759 // CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
6760 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP46]], align 8
6761 // CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 1
6762 // CHECK13-NEXT: store ptr null, ptr [[TMP47]], align 8
6763 // CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
6764 // CHECK13-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
6765 // CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
6766 // CHECK13-NEXT: store i32 3, ptr [[TMP50]], align 4
6767 // CHECK13-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
6768 // CHECK13-NEXT: store i32 2, ptr [[TMP51]], align 4
6769 // CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
6770 // CHECK13-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 8
6771 // CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
6772 // CHECK13-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 8
6773 // CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
6774 // CHECK13-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 8
6775 // CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
6776 // CHECK13-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 8
6777 // CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
6778 // CHECK13-NEXT: store ptr null, ptr [[TMP56]], align 8
6779 // CHECK13-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
6780 // CHECK13-NEXT: store ptr null, ptr [[TMP57]], align 8
6781 // CHECK13-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
6782 // CHECK13-NEXT: store i64 10, ptr [[TMP58]], align 8
6783 // CHECK13-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
6784 // CHECK13-NEXT: store i64 0, ptr [[TMP59]], align 8
6785 // CHECK13-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
6786 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
6787 // CHECK13-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
6788 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
6789 // CHECK13-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
6790 // CHECK13-NEXT: store i32 0, ptr [[TMP62]], align 4
6791 // CHECK13-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
6792 // CHECK13-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
6793 // CHECK13-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
6794 // CHECK13: omp_offload.failed13:
6795 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i64 [[TMP41]], ptr [[A]]) #[[ATTR3]]
6796 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT14]]
6797 // CHECK13: omp_offload.cont14:
6798 // CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
6799 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP65]], align 8
6800 // CHECK13-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
6801 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP66]], align 8
6802 // CHECK13-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 0
6803 // CHECK13-NEXT: store ptr null, ptr [[TMP67]], align 8
6804 // CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
6805 // CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
6806 // CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
6807 // CHECK13-NEXT: store i32 3, ptr [[TMP70]], align 4
6808 // CHECK13-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
6809 // CHECK13-NEXT: store i32 1, ptr [[TMP71]], align 4
6810 // CHECK13-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
6811 // CHECK13-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 8
6812 // CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
6813 // CHECK13-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 8
6814 // CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
6815 // CHECK13-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 8
6816 // CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
6817 // CHECK13-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 8
6818 // CHECK13-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
6819 // CHECK13-NEXT: store ptr null, ptr [[TMP76]], align 8
6820 // CHECK13-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
6821 // CHECK13-NEXT: store ptr null, ptr [[TMP77]], align 8
6822 // CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
6823 // CHECK13-NEXT: store i64 10, ptr [[TMP78]], align 8
6824 // CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
6825 // CHECK13-NEXT: store i64 0, ptr [[TMP79]], align 8
6826 // CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
6827 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
6828 // CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
6829 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
6830 // CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
6831 // CHECK13-NEXT: store i32 0, ptr [[TMP82]], align 4
6832 // CHECK13-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
6833 // CHECK13-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
6834 // CHECK13-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
6835 // CHECK13: omp_offload.failed20:
6836 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR3]]
6837 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT21]]
6838 // CHECK13: omp_offload.cont21:
6839 // CHECK13-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
6840 // CHECK13-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
6841 // CHECK13-NEXT: [[TMP86:%.*]] = load i64, ptr [[M_CASTED22]], align 8
6842 // CHECK13-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
6843 // CHECK13-NEXT: store i64 [[TMP86]], ptr [[TMP87]], align 8
6844 // CHECK13-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
6845 // CHECK13-NEXT: store i64 [[TMP86]], ptr [[TMP88]], align 8
6846 // CHECK13-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 0
6847 // CHECK13-NEXT: store ptr null, ptr [[TMP89]], align 8
6848 // CHECK13-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
6849 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP90]], align 8
6850 // CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
6851 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP91]], align 8
6852 // CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 1
6853 // CHECK13-NEXT: store ptr null, ptr [[TMP92]], align 8
6854 // CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
6855 // CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
6856 // CHECK13-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
6857 // CHECK13-NEXT: store i32 3, ptr [[TMP95]], align 4
6858 // CHECK13-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
6859 // CHECK13-NEXT: store i32 2, ptr [[TMP96]], align 4
6860 // CHECK13-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
6861 // CHECK13-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 8
6862 // CHECK13-NEXT: [[TMP98:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
6863 // CHECK13-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 8
6864 // CHECK13-NEXT: [[TMP99:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
6865 // CHECK13-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 8
6866 // CHECK13-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
6867 // CHECK13-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 8
6868 // CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
6869 // CHECK13-NEXT: store ptr null, ptr [[TMP101]], align 8
6870 // CHECK13-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
6871 // CHECK13-NEXT: store ptr null, ptr [[TMP102]], align 8
6872 // CHECK13-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
6873 // CHECK13-NEXT: store i64 10, ptr [[TMP103]], align 8
6874 // CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
6875 // CHECK13-NEXT: store i64 0, ptr [[TMP104]], align 8
6876 // CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
6877 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
6878 // CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
6879 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
6880 // CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
6881 // CHECK13-NEXT: store i32 0, ptr [[TMP107]], align 4
6882 // CHECK13-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
6883 // CHECK13-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
6884 // CHECK13-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
6885 // CHECK13: omp_offload.failed28:
6886 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i64 [[TMP86]], ptr [[A]]) #[[ATTR3]]
6887 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT29]]
6888 // CHECK13: omp_offload.cont29:
6889 // CHECK13-NEXT: ret i32 0
6892 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
6893 // CHECK13-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
6894 // CHECK13-NEXT: entry:
6895 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6896 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6897 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6898 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
6899 // CHECK13-NEXT: ret void
6902 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
6903 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
6904 // CHECK13-NEXT: entry:
6905 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6906 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6907 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6908 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6909 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6910 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6911 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6912 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6913 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6914 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6915 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6916 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6917 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6918 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6919 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
6920 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
6921 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6922 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6923 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6924 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
6925 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6926 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6927 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
6928 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6929 // CHECK13: cond.true:
6930 // CHECK13-NEXT: br label [[COND_END:%.*]]
6931 // CHECK13: cond.false:
6932 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6933 // CHECK13-NEXT: br label [[COND_END]]
6934 // CHECK13: cond.end:
6935 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6936 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
6937 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
6938 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
6939 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6940 // CHECK13: omp.inner.for.cond:
6941 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]]
6942 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
6943 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
6944 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6945 // CHECK13: omp.inner.for.body:
6946 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP46]]
6947 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
6948 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
6949 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
6950 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP46]]
6951 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6952 // CHECK13: omp.inner.for.inc:
6953 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
6954 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP46]]
6955 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
6956 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
6957 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
6958 // CHECK13: omp.inner.for.end:
6959 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6960 // CHECK13: omp.loop.exit:
6961 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
6962 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6963 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
6964 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6965 // CHECK13: .omp.final.then:
6966 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
6967 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6968 // CHECK13: .omp.final.done:
6969 // CHECK13-NEXT: ret void
6972 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
6973 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
6974 // CHECK13-NEXT: entry:
6975 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6976 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6977 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6978 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6979 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6980 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6981 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6982 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6983 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6984 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6985 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6986 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6987 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6988 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6989 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6990 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6991 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6992 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6993 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6994 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
6995 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6996 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
6997 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6998 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
6999 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7000 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7001 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7002 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7003 // CHECK13-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7004 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
7005 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7006 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7007 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
7008 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7009 // CHECK13: cond.true:
7010 // CHECK13-NEXT: br label [[COND_END:%.*]]
7011 // CHECK13: cond.false:
7012 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7013 // CHECK13-NEXT: br label [[COND_END]]
7014 // CHECK13: cond.end:
7015 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
7016 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
7017 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7018 // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
7019 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7020 // CHECK13: omp.inner.for.cond:
7021 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]]
7022 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP49]]
7023 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
7024 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7025 // CHECK13: omp.inner.for.body:
7026 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
7027 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
7028 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7029 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
7030 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
7031 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
7032 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7033 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP49]]
7034 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7035 // CHECK13: omp.body.continue:
7036 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7037 // CHECK13: omp.inner.for.inc:
7038 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
7039 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
7040 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
7041 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
7042 // CHECK13: omp.inner.for.end:
7043 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7044 // CHECK13: omp.loop.exit:
7045 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
7046 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7047 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
7048 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7049 // CHECK13: .omp.final.then:
7050 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7051 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7052 // CHECK13: .omp.final.done:
7053 // CHECK13-NEXT: ret void
7056 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
7057 // CHECK13-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7058 // CHECK13-NEXT: entry:
7059 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7060 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7061 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7062 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
7063 // CHECK13-NEXT: ret void
7066 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
7067 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7068 // CHECK13-NEXT: entry:
7069 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7070 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7071 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7072 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7073 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7074 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7075 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7076 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7077 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7078 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7079 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7080 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7081 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7082 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7083 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7084 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7085 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7086 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7087 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7088 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7089 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7090 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7091 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7092 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7093 // CHECK13: cond.true:
7094 // CHECK13-NEXT: br label [[COND_END:%.*]]
7095 // CHECK13: cond.false:
7096 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7097 // CHECK13-NEXT: br label [[COND_END]]
7098 // CHECK13: cond.end:
7099 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7100 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7101 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7102 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7103 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7104 // CHECK13: omp.inner.for.cond:
7105 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]]
7106 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
7107 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7108 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7109 // CHECK13: omp.inner.for.body:
7110 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP52]]
7111 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7112 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
7113 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7114 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP52]]
7115 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7116 // CHECK13: omp.inner.for.inc:
7117 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
7118 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP52]]
7119 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
7120 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
7121 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
7122 // CHECK13: omp.inner.for.end:
7123 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7124 // CHECK13: omp.loop.exit:
7125 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7126 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7127 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7128 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7129 // CHECK13: .omp.final.then:
7130 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7131 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7132 // CHECK13: .omp.final.done:
7133 // CHECK13-NEXT: ret void
7136 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
7137 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7138 // CHECK13-NEXT: entry:
7139 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7140 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7141 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7142 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7143 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7144 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7145 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7146 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7147 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7148 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7149 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7150 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7151 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7152 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7153 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7154 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7155 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7156 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7157 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7158 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7159 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7160 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7161 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7162 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7163 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7164 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7165 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7166 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7167 // CHECK13-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7168 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
7169 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7170 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7171 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
7172 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7173 // CHECK13: cond.true:
7174 // CHECK13-NEXT: br label [[COND_END:%.*]]
7175 // CHECK13: cond.false:
7176 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7177 // CHECK13-NEXT: br label [[COND_END]]
7178 // CHECK13: cond.end:
7179 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
7180 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
7181 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7182 // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
7183 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7184 // CHECK13: omp.inner.for.cond:
7185 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]]
7186 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP55]]
7187 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
7188 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7189 // CHECK13: omp.inner.for.body:
7190 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
7191 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
7192 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7193 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
7194 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
7195 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
7196 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7197 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP55]]
7198 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7199 // CHECK13: omp.body.continue:
7200 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7201 // CHECK13: omp.inner.for.inc:
7202 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
7203 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
7204 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
7205 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
7206 // CHECK13: omp.inner.for.end:
7207 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7208 // CHECK13: omp.loop.exit:
7209 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
7210 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7211 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
7212 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7213 // CHECK13: .omp.final.then:
7214 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7215 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7216 // CHECK13: .omp.final.done:
7217 // CHECK13-NEXT: ret void
7220 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
7221 // CHECK13-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7222 // CHECK13-NEXT: entry:
7223 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
7224 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7225 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7226 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7227 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
7228 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7229 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7230 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
7231 // CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
7232 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
7233 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
7234 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
7235 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
7236 // CHECK13-NEXT: ret void
7239 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
7240 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7241 // CHECK13-NEXT: entry:
7242 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7243 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7244 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7245 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7246 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7247 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7248 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7249 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7250 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7251 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7252 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7253 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7254 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7255 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7256 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7257 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7258 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7259 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7260 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7261 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7262 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7263 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7264 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7265 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7266 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7267 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7268 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7269 // CHECK13: cond.true:
7270 // CHECK13-NEXT: br label [[COND_END:%.*]]
7271 // CHECK13: cond.false:
7272 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7273 // CHECK13-NEXT: br label [[COND_END]]
7274 // CHECK13: cond.end:
7275 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7276 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7277 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7278 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7279 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7280 // CHECK13: omp.inner.for.cond:
7281 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]]
7282 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
7283 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7284 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7285 // CHECK13: omp.inner.for.body:
7286 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP58]]
7287 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7288 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
7289 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7290 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP58]]
7291 // CHECK13-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP58]]
7292 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP58]]
7293 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP58]]
7294 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7295 // CHECK13: omp.inner.for.inc:
7296 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
7297 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP58]]
7298 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
7299 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
7300 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
7301 // CHECK13: omp.inner.for.end:
7302 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7303 // CHECK13: omp.loop.exit:
7304 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7305 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7306 // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
7307 // CHECK13-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7308 // CHECK13: .omp.final.then:
7309 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7310 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7311 // CHECK13: .omp.final.done:
7312 // CHECK13-NEXT: ret void
7315 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
7316 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7317 // CHECK13-NEXT: entry:
7318 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7319 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7320 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7321 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7322 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7323 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7324 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7325 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7326 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7327 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7328 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7329 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7330 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7331 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7332 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7333 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7334 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7335 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7336 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7337 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7338 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7339 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7340 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7341 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7342 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7343 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7344 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7345 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7346 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7347 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7348 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
7349 // CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7350 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
7351 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
7352 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
7353 // CHECK13: omp.dispatch.cond:
7354 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7355 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7356 // CHECK13-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP7]] to i32
7357 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV2]]
7358 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7359 // CHECK13: cond.true:
7360 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7361 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
7362 // CHECK13-NEXT: br label [[COND_END:%.*]]
7363 // CHECK13: cond.false:
7364 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7365 // CHECK13-NEXT: br label [[COND_END]]
7366 // CHECK13: cond.end:
7367 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
7368 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
7369 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7370 // CHECK13-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
7371 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
7372 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7373 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
7374 // CHECK13-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7375 // CHECK13: omp.dispatch.body:
7376 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7377 // CHECK13: omp.inner.for.cond:
7378 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61:![0-9]+]]
7379 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP61]]
7380 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
7381 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7382 // CHECK13: omp.inner.for.body:
7383 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
7384 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
7385 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7386 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
7387 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
7388 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
7389 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7390 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP61]]
7391 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7392 // CHECK13: omp.body.continue:
7393 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7394 // CHECK13: omp.inner.for.inc:
7395 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
7396 // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], 1
7397 // CHECK13-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
7398 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
7399 // CHECK13: omp.inner.for.end:
7400 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
7401 // CHECK13: omp.dispatch.inc:
7402 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7403 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
7404 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
7405 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
7406 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7407 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
7408 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
7409 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
7410 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
7411 // CHECK13: omp.dispatch.end:
7412 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP5]])
7413 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7414 // CHECK13-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
7415 // CHECK13-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7416 // CHECK13: .omp.final.then:
7417 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7418 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7419 // CHECK13: .omp.final.done:
7420 // CHECK13-NEXT: ret void
7423 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
7424 // CHECK13-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7425 // CHECK13-NEXT: entry:
7426 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7427 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7428 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7429 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
7430 // CHECK13-NEXT: ret void
7433 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
7434 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7435 // CHECK13-NEXT: entry:
7436 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7437 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7438 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7439 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7440 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7441 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7442 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7443 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7444 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7445 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7446 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7447 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7448 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7449 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7450 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7451 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7452 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7453 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7454 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7455 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7456 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7457 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7458 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7459 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7460 // CHECK13: cond.true:
7461 // CHECK13-NEXT: br label [[COND_END:%.*]]
7462 // CHECK13: cond.false:
7463 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7464 // CHECK13-NEXT: br label [[COND_END]]
7465 // CHECK13: cond.end:
7466 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7467 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7468 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7469 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7470 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7471 // CHECK13: omp.inner.for.cond:
7472 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64:![0-9]+]]
7473 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
7474 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7475 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7476 // CHECK13: omp.inner.for.body:
7477 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP64]]
7478 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7479 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
7480 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7481 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP64]]
7482 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7483 // CHECK13: omp.inner.for.inc:
7484 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
7485 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP64]]
7486 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
7487 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
7488 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
7489 // CHECK13: omp.inner.for.end:
7490 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7491 // CHECK13: omp.loop.exit:
7492 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7493 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7494 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7495 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7496 // CHECK13: .omp.final.then:
7497 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7498 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7499 // CHECK13: .omp.final.done:
7500 // CHECK13-NEXT: ret void
7503 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
7504 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7505 // CHECK13-NEXT: entry:
7506 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7507 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7508 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7509 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7510 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7511 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7512 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7513 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7514 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7515 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7516 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7517 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7518 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7519 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7520 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7521 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7522 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7523 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7524 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7525 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7526 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7527 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7528 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7529 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7530 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7531 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7532 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7533 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7534 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7535 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7536 // CHECK13-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7537 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
7538 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
7539 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
7540 // CHECK13: omp.dispatch.cond:
7541 // CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
7542 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
7543 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7544 // CHECK13: omp.dispatch.body:
7545 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7546 // CHECK13-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
7547 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7548 // CHECK13: omp.inner.for.cond:
7549 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67:![0-9]+]]
7550 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP67]]
7551 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
7552 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7553 // CHECK13: omp.inner.for.body:
7554 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
7555 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
7556 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7557 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
7558 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
7559 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
7560 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7561 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP67]]
7562 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7563 // CHECK13: omp.body.continue:
7564 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7565 // CHECK13: omp.inner.for.inc:
7566 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
7567 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
7568 // CHECK13-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
7569 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
7570 // CHECK13: omp.inner.for.end:
7571 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
7572 // CHECK13: omp.dispatch.inc:
7573 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
7574 // CHECK13: omp.dispatch.end:
7575 // CHECK13-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
7576 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7577 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7578 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7579 // CHECK13: .omp.final.then:
7580 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7581 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7582 // CHECK13: .omp.final.done:
7583 // CHECK13-NEXT: ret void
7586 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
7587 // CHECK13-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7588 // CHECK13-NEXT: entry:
7589 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
7590 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7591 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7592 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7593 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
7594 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7595 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7596 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
7597 // CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
7598 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
7599 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
7600 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
7601 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
7602 // CHECK13-NEXT: ret void
7605 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
7606 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7607 // CHECK13-NEXT: entry:
7608 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7609 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7610 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7611 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7612 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7613 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7614 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7615 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7616 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7617 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7618 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7619 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7620 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7621 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7622 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7623 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7624 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7625 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7626 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7627 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7628 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7629 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7630 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7631 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7632 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7633 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7634 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7635 // CHECK13: cond.true:
7636 // CHECK13-NEXT: br label [[COND_END:%.*]]
7637 // CHECK13: cond.false:
7638 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7639 // CHECK13-NEXT: br label [[COND_END]]
7640 // CHECK13: cond.end:
7641 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7642 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7643 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7644 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7645 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7646 // CHECK13: omp.inner.for.cond:
7647 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70:![0-9]+]]
7648 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
7649 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7650 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7651 // CHECK13: omp.inner.for.body:
7652 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP70]]
7653 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7654 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
7655 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7656 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP70]]
7657 // CHECK13-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP70]]
7658 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP70]]
7659 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP70]]
7660 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7661 // CHECK13: omp.inner.for.inc:
7662 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
7663 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP70]]
7664 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
7665 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
7666 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
7667 // CHECK13: omp.inner.for.end:
7668 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7669 // CHECK13: omp.loop.exit:
7670 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7671 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7672 // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
7673 // CHECK13-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7674 // CHECK13: .omp.final.then:
7675 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7676 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7677 // CHECK13: .omp.final.done:
7678 // CHECK13-NEXT: ret void
7681 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
7682 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
7683 // CHECK13-NEXT: entry:
7684 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7685 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7686 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7687 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7688 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7689 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7690 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7691 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7692 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7693 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7694 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7695 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7696 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7697 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7698 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7699 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7700 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7701 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7702 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7703 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7704 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7705 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7706 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7707 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7708 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7709 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7710 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7711 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7712 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7713 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7714 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
7715 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7716 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7717 // CHECK13-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7718 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
7719 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
7720 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
7721 // CHECK13: omp.dispatch.cond:
7722 // CHECK13-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
7723 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
7724 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7725 // CHECK13: omp.dispatch.body:
7726 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7727 // CHECK13-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
7728 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7729 // CHECK13: omp.inner.for.cond:
7730 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73:![0-9]+]]
7731 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP73]]
7732 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
7733 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7734 // CHECK13: omp.inner.for.body:
7735 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
7736 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
7737 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7738 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
7739 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
7740 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
7741 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7742 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP73]]
7743 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7744 // CHECK13: omp.body.continue:
7745 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7746 // CHECK13: omp.inner.for.inc:
7747 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
7748 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP14]], 1
7749 // CHECK13-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
7750 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
7751 // CHECK13: omp.inner.for.end:
7752 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
7753 // CHECK13: omp.dispatch.inc:
7754 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
7755 // CHECK13: omp.dispatch.end:
7756 // CHECK13-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP7]])
7757 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7758 // CHECK13-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
7759 // CHECK13-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7760 // CHECK13: .omp.final.then:
7761 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7762 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7763 // CHECK13: .omp.final.done:
7764 // CHECK13-NEXT: ret void
7767 // CHECK14-LABEL: define {{[^@]+}}@main
7768 // CHECK14-SAME: (i32 noundef signext [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
7769 // CHECK14-NEXT: entry:
7770 // CHECK14-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
7771 // CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
7772 // CHECK14-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
7773 // CHECK14-NEXT: [[N:%.*]] = alloca i32, align 4
7774 // CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
7775 // CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
7776 // CHECK14-NEXT: [[M:%.*]] = alloca i32, align 4
7777 // CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
7778 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 8
7779 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 8
7780 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 8
7781 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
7782 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
7783 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7784 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7785 // CHECK14-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
7786 // CHECK14-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
7787 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 8
7788 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 8
7789 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 8
7790 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 8
7791 // CHECK14-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
7792 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
7793 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
7794 // CHECK14-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7795 // CHECK14-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
7796 // CHECK14-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8
7797 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 8
7798 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 8
7799 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 8
7800 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 8
7801 // CHECK14-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
7802 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
7803 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
7804 // CHECK14-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7805 // CHECK14-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8
7806 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 8
7807 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 8
7808 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 8
7809 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 8
7810 // CHECK14-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
7811 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
7812 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
7813 // CHECK14-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7814 // CHECK14-NEXT: [[M_CASTED48:%.*]] = alloca i64, align 8
7815 // CHECK14-NEXT: [[N_CASTED49:%.*]] = alloca i64, align 8
7816 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 8
7817 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 8
7818 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 8
7819 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 8
7820 // CHECK14-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
7821 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
7822 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
7823 // CHECK14-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7824 // CHECK14-NEXT: store i32 0, ptr [[RETVAL]], align 4
7825 // CHECK14-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
7826 // CHECK14-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
7827 // CHECK14-NEXT: store i32 100, ptr [[N]], align 4
7828 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
7829 // CHECK14-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
7830 // CHECK14-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
7831 // CHECK14-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
7832 // CHECK14-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
7833 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
7834 // CHECK14-NEXT: store i32 10, ptr [[M]], align 4
7835 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
7836 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
7837 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
7838 // CHECK14-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
7839 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 24, i1 false)
7840 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7841 // CHECK14-NEXT: store i64 [[TMP4]], ptr [[TMP6]], align 8
7842 // CHECK14-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7843 // CHECK14-NEXT: store i64 [[TMP4]], ptr [[TMP7]], align 8
7844 // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7845 // CHECK14-NEXT: store ptr null, ptr [[TMP8]], align 8
7846 // CHECK14-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7847 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP9]], align 8
7848 // CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7849 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP10]], align 8
7850 // CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7851 // CHECK14-NEXT: store ptr null, ptr [[TMP11]], align 8
7852 // CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7853 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 8
7854 // CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7855 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 8
7856 // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
7857 // CHECK14-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 8
7858 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7859 // CHECK14-NEXT: store ptr null, ptr [[TMP15]], align 8
7860 // CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7861 // CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7862 // CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
7863 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
7864 // CHECK14-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
7865 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
7866 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
7867 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7868 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7869 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
7870 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
7871 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
7872 // CHECK14-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
7873 // CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
7874 // CHECK14-NEXT: store i32 3, ptr [[TMP23]], align 4
7875 // CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
7876 // CHECK14-NEXT: store i32 3, ptr [[TMP24]], align 4
7877 // CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
7878 // CHECK14-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 8
7879 // CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
7880 // CHECK14-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 8
7881 // CHECK14-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
7882 // CHECK14-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 8
7883 // CHECK14-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
7884 // CHECK14-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 8
7885 // CHECK14-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
7886 // CHECK14-NEXT: store ptr null, ptr [[TMP29]], align 8
7887 // CHECK14-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
7888 // CHECK14-NEXT: store ptr null, ptr [[TMP30]], align 8
7889 // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
7890 // CHECK14-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
7891 // CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
7892 // CHECK14-NEXT: store i64 0, ptr [[TMP32]], align 8
7893 // CHECK14-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
7894 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
7895 // CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
7896 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
7897 // CHECK14-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
7898 // CHECK14-NEXT: store i32 0, ptr [[TMP35]], align 4
7899 // CHECK14-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
7900 // CHECK14-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
7901 // CHECK14-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7902 // CHECK14: omp_offload.failed:
7903 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3:[0-9]+]]
7904 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]]
7905 // CHECK14: omp_offload.cont:
7906 // CHECK14-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
7907 // CHECK14-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
7908 // CHECK14-NEXT: [[TMP39:%.*]] = load i64, ptr [[N_CASTED3]], align 8
7909 // CHECK14-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP1]], 4
7910 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES7]], ptr align 8 @.offload_sizes.1, i64 24, i1 false)
7911 // CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
7912 // CHECK14-NEXT: store i64 [[TMP39]], ptr [[TMP41]], align 8
7913 // CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
7914 // CHECK14-NEXT: store i64 [[TMP39]], ptr [[TMP42]], align 8
7915 // CHECK14-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0
7916 // CHECK14-NEXT: store ptr null, ptr [[TMP43]], align 8
7917 // CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
7918 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP44]], align 8
7919 // CHECK14-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
7920 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP45]], align 8
7921 // CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 1
7922 // CHECK14-NEXT: store ptr null, ptr [[TMP46]], align 8
7923 // CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
7924 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP47]], align 8
7925 // CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
7926 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 8
7927 // CHECK14-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
7928 // CHECK14-NEXT: store i64 [[TMP40]], ptr [[TMP49]], align 8
7929 // CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 2
7930 // CHECK14-NEXT: store ptr null, ptr [[TMP50]], align 8
7931 // CHECK14-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
7932 // CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
7933 // CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
7934 // CHECK14-NEXT: [[TMP54:%.*]] = load i32, ptr [[N]], align 4
7935 // CHECK14-NEXT: store i32 [[TMP54]], ptr [[DOTCAPTURE_EXPR_9]], align 4
7936 // CHECK14-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
7937 // CHECK14-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP55]], 0
7938 // CHECK14-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
7939 // CHECK14-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
7940 // CHECK14-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
7941 // CHECK14-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
7942 // CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP56]], 1
7943 // CHECK14-NEXT: [[TMP57:%.*]] = zext i32 [[ADD14]] to i64
7944 // CHECK14-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
7945 // CHECK14-NEXT: store i32 3, ptr [[TMP58]], align 4
7946 // CHECK14-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
7947 // CHECK14-NEXT: store i32 3, ptr [[TMP59]], align 4
7948 // CHECK14-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
7949 // CHECK14-NEXT: store ptr [[TMP51]], ptr [[TMP60]], align 8
7950 // CHECK14-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
7951 // CHECK14-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 8
7952 // CHECK14-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
7953 // CHECK14-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 8
7954 // CHECK14-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
7955 // CHECK14-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP63]], align 8
7956 // CHECK14-NEXT: [[TMP64:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
7957 // CHECK14-NEXT: store ptr null, ptr [[TMP64]], align 8
7958 // CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
7959 // CHECK14-NEXT: store ptr null, ptr [[TMP65]], align 8
7960 // CHECK14-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
7961 // CHECK14-NEXT: store i64 [[TMP57]], ptr [[TMP66]], align 8
7962 // CHECK14-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
7963 // CHECK14-NEXT: store i64 0, ptr [[TMP67]], align 8
7964 // CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
7965 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP68]], align 4
7966 // CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
7967 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
7968 // CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
7969 // CHECK14-NEXT: store i32 0, ptr [[TMP70]], align 4
7970 // CHECK14-NEXT: [[TMP71:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
7971 // CHECK14-NEXT: [[TMP72:%.*]] = icmp ne i32 [[TMP71]], 0
7972 // CHECK14-NEXT: br i1 [[TMP72]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
7973 // CHECK14: omp_offload.failed16:
7974 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP39]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
7975 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT17]]
7976 // CHECK14: omp_offload.cont17:
7977 // CHECK14-NEXT: [[TMP73:%.*]] = load i32, ptr [[M]], align 4
7978 // CHECK14-NEXT: store i32 [[TMP73]], ptr [[M_CASTED]], align 4
7979 // CHECK14-NEXT: [[TMP74:%.*]] = load i64, ptr [[M_CASTED]], align 8
7980 // CHECK14-NEXT: [[TMP75:%.*]] = load i32, ptr [[N]], align 4
7981 // CHECK14-NEXT: store i32 [[TMP75]], ptr [[N_CASTED18]], align 4
7982 // CHECK14-NEXT: [[TMP76:%.*]] = load i64, ptr [[N_CASTED18]], align 8
7983 // CHECK14-NEXT: [[TMP77:%.*]] = mul nuw i64 [[TMP1]], 4
7984 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES22]], ptr align 8 @.offload_sizes.3, i64 32, i1 false)
7985 // CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
7986 // CHECK14-NEXT: store i64 [[TMP74]], ptr [[TMP78]], align 8
7987 // CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
7988 // CHECK14-NEXT: store i64 [[TMP74]], ptr [[TMP79]], align 8
7989 // CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
7990 // CHECK14-NEXT: store ptr null, ptr [[TMP80]], align 8
7991 // CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
7992 // CHECK14-NEXT: store i64 [[TMP76]], ptr [[TMP81]], align 8
7993 // CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
7994 // CHECK14-NEXT: store i64 [[TMP76]], ptr [[TMP82]], align 8
7995 // CHECK14-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1
7996 // CHECK14-NEXT: store ptr null, ptr [[TMP83]], align 8
7997 // CHECK14-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
7998 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP84]], align 8
7999 // CHECK14-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
8000 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP85]], align 8
8001 // CHECK14-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2
8002 // CHECK14-NEXT: store ptr null, ptr [[TMP86]], align 8
8003 // CHECK14-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
8004 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP87]], align 8
8005 // CHECK14-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
8006 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP88]], align 8
8007 // CHECK14-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
8008 // CHECK14-NEXT: store i64 [[TMP77]], ptr [[TMP89]], align 8
8009 // CHECK14-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 3
8010 // CHECK14-NEXT: store ptr null, ptr [[TMP90]], align 8
8011 // CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
8012 // CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
8013 // CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
8014 // CHECK14-NEXT: [[TMP94:%.*]] = load i32, ptr [[N]], align 4
8015 // CHECK14-NEXT: store i32 [[TMP94]], ptr [[DOTCAPTURE_EXPR_24]], align 4
8016 // CHECK14-NEXT: [[TMP95:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
8017 // CHECK14-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP95]], 0
8018 // CHECK14-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
8019 // CHECK14-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
8020 // CHECK14-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
8021 // CHECK14-NEXT: [[TMP96:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
8022 // CHECK14-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP96]], 1
8023 // CHECK14-NEXT: [[TMP97:%.*]] = zext i32 [[ADD29]] to i64
8024 // CHECK14-NEXT: [[TMP98:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
8025 // CHECK14-NEXT: store i32 3, ptr [[TMP98]], align 4
8026 // CHECK14-NEXT: [[TMP99:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
8027 // CHECK14-NEXT: store i32 4, ptr [[TMP99]], align 4
8028 // CHECK14-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
8029 // CHECK14-NEXT: store ptr [[TMP91]], ptr [[TMP100]], align 8
8030 // CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
8031 // CHECK14-NEXT: store ptr [[TMP92]], ptr [[TMP101]], align 8
8032 // CHECK14-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
8033 // CHECK14-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 8
8034 // CHECK14-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
8035 // CHECK14-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP103]], align 8
8036 // CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
8037 // CHECK14-NEXT: store ptr null, ptr [[TMP104]], align 8
8038 // CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
8039 // CHECK14-NEXT: store ptr null, ptr [[TMP105]], align 8
8040 // CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
8041 // CHECK14-NEXT: store i64 [[TMP97]], ptr [[TMP106]], align 8
8042 // CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
8043 // CHECK14-NEXT: store i64 0, ptr [[TMP107]], align 8
8044 // CHECK14-NEXT: [[TMP108:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
8045 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP108]], align 4
8046 // CHECK14-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
8047 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP109]], align 4
8048 // CHECK14-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
8049 // CHECK14-NEXT: store i32 0, ptr [[TMP110]], align 4
8050 // CHECK14-NEXT: [[TMP111:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
8051 // CHECK14-NEXT: [[TMP112:%.*]] = icmp ne i32 [[TMP111]], 0
8052 // CHECK14-NEXT: br i1 [[TMP112]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
8053 // CHECK14: omp_offload.failed31:
8054 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP74]], i64 [[TMP76]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
8055 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT32]]
8056 // CHECK14: omp_offload.cont32:
8057 // CHECK14-NEXT: [[TMP113:%.*]] = load i32, ptr [[N]], align 4
8058 // CHECK14-NEXT: store i32 [[TMP113]], ptr [[N_CASTED33]], align 4
8059 // CHECK14-NEXT: [[TMP114:%.*]] = load i64, ptr [[N_CASTED33]], align 8
8060 // CHECK14-NEXT: [[TMP115:%.*]] = mul nuw i64 [[TMP1]], 4
8061 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES37]], ptr align 8 @.offload_sizes.5, i64 24, i1 false)
8062 // CHECK14-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8063 // CHECK14-NEXT: store i64 [[TMP114]], ptr [[TMP116]], align 8
8064 // CHECK14-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8065 // CHECK14-NEXT: store i64 [[TMP114]], ptr [[TMP117]], align 8
8066 // CHECK14-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
8067 // CHECK14-NEXT: store ptr null, ptr [[TMP118]], align 8
8068 // CHECK14-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
8069 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP119]], align 8
8070 // CHECK14-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
8071 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP120]], align 8
8072 // CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
8073 // CHECK14-NEXT: store ptr null, ptr [[TMP121]], align 8
8074 // CHECK14-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
8075 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP122]], align 8
8076 // CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
8077 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP123]], align 8
8078 // CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
8079 // CHECK14-NEXT: store i64 [[TMP115]], ptr [[TMP124]], align 8
8080 // CHECK14-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
8081 // CHECK14-NEXT: store ptr null, ptr [[TMP125]], align 8
8082 // CHECK14-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8083 // CHECK14-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8084 // CHECK14-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
8085 // CHECK14-NEXT: [[TMP129:%.*]] = load i32, ptr [[N]], align 4
8086 // CHECK14-NEXT: store i32 [[TMP129]], ptr [[DOTCAPTURE_EXPR_39]], align 4
8087 // CHECK14-NEXT: [[TMP130:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
8088 // CHECK14-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP130]], 0
8089 // CHECK14-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
8090 // CHECK14-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
8091 // CHECK14-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
8092 // CHECK14-NEXT: [[TMP131:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
8093 // CHECK14-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP131]], 1
8094 // CHECK14-NEXT: [[TMP132:%.*]] = zext i32 [[ADD44]] to i64
8095 // CHECK14-NEXT: [[TMP133:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
8096 // CHECK14-NEXT: store i32 3, ptr [[TMP133]], align 4
8097 // CHECK14-NEXT: [[TMP134:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
8098 // CHECK14-NEXT: store i32 3, ptr [[TMP134]], align 4
8099 // CHECK14-NEXT: [[TMP135:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
8100 // CHECK14-NEXT: store ptr [[TMP126]], ptr [[TMP135]], align 8
8101 // CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
8102 // CHECK14-NEXT: store ptr [[TMP127]], ptr [[TMP136]], align 8
8103 // CHECK14-NEXT: [[TMP137:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
8104 // CHECK14-NEXT: store ptr [[TMP128]], ptr [[TMP137]], align 8
8105 // CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
8106 // CHECK14-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP138]], align 8
8107 // CHECK14-NEXT: [[TMP139:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
8108 // CHECK14-NEXT: store ptr null, ptr [[TMP139]], align 8
8109 // CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
8110 // CHECK14-NEXT: store ptr null, ptr [[TMP140]], align 8
8111 // CHECK14-NEXT: [[TMP141:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
8112 // CHECK14-NEXT: store i64 [[TMP132]], ptr [[TMP141]], align 8
8113 // CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
8114 // CHECK14-NEXT: store i64 0, ptr [[TMP142]], align 8
8115 // CHECK14-NEXT: [[TMP143:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
8116 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP143]], align 4
8117 // CHECK14-NEXT: [[TMP144:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
8118 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP144]], align 4
8119 // CHECK14-NEXT: [[TMP145:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
8120 // CHECK14-NEXT: store i32 0, ptr [[TMP145]], align 4
8121 // CHECK14-NEXT: [[TMP146:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
8122 // CHECK14-NEXT: [[TMP147:%.*]] = icmp ne i32 [[TMP146]], 0
8123 // CHECK14-NEXT: br i1 [[TMP147]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
8124 // CHECK14: omp_offload.failed46:
8125 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP114]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
8126 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT47]]
8127 // CHECK14: omp_offload.cont47:
8128 // CHECK14-NEXT: [[TMP148:%.*]] = load i32, ptr [[M]], align 4
8129 // CHECK14-NEXT: store i32 [[TMP148]], ptr [[M_CASTED48]], align 4
8130 // CHECK14-NEXT: [[TMP149:%.*]] = load i64, ptr [[M_CASTED48]], align 8
8131 // CHECK14-NEXT: [[TMP150:%.*]] = load i32, ptr [[N]], align 4
8132 // CHECK14-NEXT: store i32 [[TMP150]], ptr [[N_CASTED49]], align 4
8133 // CHECK14-NEXT: [[TMP151:%.*]] = load i64, ptr [[N_CASTED49]], align 8
8134 // CHECK14-NEXT: [[TMP152:%.*]] = mul nuw i64 [[TMP1]], 4
8135 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES53]], ptr align 8 @.offload_sizes.7, i64 32, i1 false)
8136 // CHECK14-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8137 // CHECK14-NEXT: store i64 [[TMP149]], ptr [[TMP153]], align 8
8138 // CHECK14-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8139 // CHECK14-NEXT: store i64 [[TMP149]], ptr [[TMP154]], align 8
8140 // CHECK14-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
8141 // CHECK14-NEXT: store ptr null, ptr [[TMP155]], align 8
8142 // CHECK14-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
8143 // CHECK14-NEXT: store i64 [[TMP151]], ptr [[TMP156]], align 8
8144 // CHECK14-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
8145 // CHECK14-NEXT: store i64 [[TMP151]], ptr [[TMP157]], align 8
8146 // CHECK14-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
8147 // CHECK14-NEXT: store ptr null, ptr [[TMP158]], align 8
8148 // CHECK14-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
8149 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP159]], align 8
8150 // CHECK14-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
8151 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP160]], align 8
8152 // CHECK14-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
8153 // CHECK14-NEXT: store ptr null, ptr [[TMP161]], align 8
8154 // CHECK14-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
8155 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP162]], align 8
8156 // CHECK14-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
8157 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP163]], align 8
8158 // CHECK14-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
8159 // CHECK14-NEXT: store i64 [[TMP152]], ptr [[TMP164]], align 8
8160 // CHECK14-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
8161 // CHECK14-NEXT: store ptr null, ptr [[TMP165]], align 8
8162 // CHECK14-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8163 // CHECK14-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8164 // CHECK14-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
8165 // CHECK14-NEXT: [[TMP169:%.*]] = load i32, ptr [[N]], align 4
8166 // CHECK14-NEXT: store i32 [[TMP169]], ptr [[DOTCAPTURE_EXPR_55]], align 4
8167 // CHECK14-NEXT: [[TMP170:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
8168 // CHECK14-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP170]], 0
8169 // CHECK14-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
8170 // CHECK14-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
8171 // CHECK14-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
8172 // CHECK14-NEXT: [[TMP171:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
8173 // CHECK14-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP171]], 1
8174 // CHECK14-NEXT: [[TMP172:%.*]] = zext i32 [[ADD60]] to i64
8175 // CHECK14-NEXT: [[TMP173:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
8176 // CHECK14-NEXT: store i32 3, ptr [[TMP173]], align 4
8177 // CHECK14-NEXT: [[TMP174:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
8178 // CHECK14-NEXT: store i32 4, ptr [[TMP174]], align 4
8179 // CHECK14-NEXT: [[TMP175:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
8180 // CHECK14-NEXT: store ptr [[TMP166]], ptr [[TMP175]], align 8
8181 // CHECK14-NEXT: [[TMP176:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
8182 // CHECK14-NEXT: store ptr [[TMP167]], ptr [[TMP176]], align 8
8183 // CHECK14-NEXT: [[TMP177:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
8184 // CHECK14-NEXT: store ptr [[TMP168]], ptr [[TMP177]], align 8
8185 // CHECK14-NEXT: [[TMP178:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
8186 // CHECK14-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP178]], align 8
8187 // CHECK14-NEXT: [[TMP179:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
8188 // CHECK14-NEXT: store ptr null, ptr [[TMP179]], align 8
8189 // CHECK14-NEXT: [[TMP180:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
8190 // CHECK14-NEXT: store ptr null, ptr [[TMP180]], align 8
8191 // CHECK14-NEXT: [[TMP181:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
8192 // CHECK14-NEXT: store i64 [[TMP172]], ptr [[TMP181]], align 8
8193 // CHECK14-NEXT: [[TMP182:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
8194 // CHECK14-NEXT: store i64 0, ptr [[TMP182]], align 8
8195 // CHECK14-NEXT: [[TMP183:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
8196 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP183]], align 4
8197 // CHECK14-NEXT: [[TMP184:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
8198 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP184]], align 4
8199 // CHECK14-NEXT: [[TMP185:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
8200 // CHECK14-NEXT: store i32 0, ptr [[TMP185]], align 4
8201 // CHECK14-NEXT: [[TMP186:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
8202 // CHECK14-NEXT: [[TMP187:%.*]] = icmp ne i32 [[TMP186]], 0
8203 // CHECK14-NEXT: br i1 [[TMP187]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
8204 // CHECK14: omp_offload.failed62:
8205 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP149]], i64 [[TMP151]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3]]
8206 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT63]]
8207 // CHECK14: omp_offload.cont63:
8208 // CHECK14-NEXT: [[TMP188:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
8209 // CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]])
8210 // CHECK14-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
8211 // CHECK14-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
8212 // CHECK14-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]])
8213 // CHECK14-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4
8214 // CHECK14-NEXT: ret i32 [[TMP190]]
8217 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
8218 // CHECK14-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
8219 // CHECK14-NEXT: entry:
8220 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8221 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8222 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8223 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8224 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8225 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8226 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8227 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8228 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
8229 // CHECK14-NEXT: ret void
8232 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
8233 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8234 // CHECK14-NEXT: entry:
8235 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8236 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8237 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8238 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8239 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8240 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8241 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8242 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8243 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8244 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8245 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8246 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8247 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8248 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8249 // CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
8250 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8251 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8252 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8253 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8254 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8255 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8256 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8257 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8258 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8259 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8260 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8261 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8262 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8263 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8264 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8265 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8266 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8267 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8268 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8269 // CHECK14: omp.precond.then:
8270 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
8271 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8272 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
8273 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8274 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8275 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8276 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
8277 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8278 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8279 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8280 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
8281 // CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8282 // CHECK14: cond.true:
8283 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8284 // CHECK14-NEXT: br label [[COND_END:%.*]]
8285 // CHECK14: cond.false:
8286 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8287 // CHECK14-NEXT: br label [[COND_END]]
8288 // CHECK14: cond.end:
8289 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
8290 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
8291 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
8292 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
8293 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8294 // CHECK14: omp.inner.for.cond:
8295 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
8296 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
8297 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
8298 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8299 // CHECK14: omp.inner.for.body:
8300 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP13]]
8301 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
8302 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
8303 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
8304 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP13]]
8305 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8306 // CHECK14: omp.inner.for.inc:
8307 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
8308 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP13]]
8309 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
8310 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
8311 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
8312 // CHECK14: omp.inner.for.end:
8313 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8314 // CHECK14: omp.loop.exit:
8315 // CHECK14-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8316 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
8317 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
8318 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8319 // CHECK14-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
8320 // CHECK14-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8321 // CHECK14: .omp.final.then:
8322 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8323 // CHECK14-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
8324 // CHECK14-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8325 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8326 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8327 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
8328 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8329 // CHECK14: .omp.final.done:
8330 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8331 // CHECK14: omp.precond.end:
8332 // CHECK14-NEXT: ret void
8335 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
8336 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8337 // CHECK14-NEXT: entry:
8338 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8339 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8340 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8341 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8342 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8343 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8344 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8345 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8346 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8347 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8348 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8349 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8350 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8351 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8352 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8353 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8354 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
8355 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8356 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8357 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8358 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8359 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8360 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8361 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8362 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8363 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8364 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8365 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8366 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8367 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8368 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8369 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8370 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8371 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8372 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8373 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8374 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8375 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8376 // CHECK14: omp.precond.then:
8377 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
8378 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8379 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
8380 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8381 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
8382 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8383 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
8384 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
8385 // CHECK14-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
8386 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8387 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8388 // CHECK14-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8389 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
8390 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8391 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8392 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8393 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
8394 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8395 // CHECK14: cond.true:
8396 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8397 // CHECK14-NEXT: br label [[COND_END:%.*]]
8398 // CHECK14: cond.false:
8399 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8400 // CHECK14-NEXT: br label [[COND_END]]
8401 // CHECK14: cond.end:
8402 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
8403 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
8404 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
8405 // CHECK14-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
8406 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8407 // CHECK14: omp.inner.for.cond:
8408 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
8409 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
8410 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
8411 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8412 // CHECK14: omp.inner.for.body:
8413 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
8414 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
8415 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8416 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
8417 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
8418 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
8419 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
8420 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]]
8421 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8422 // CHECK14: omp.body.continue:
8423 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8424 // CHECK14: omp.inner.for.inc:
8425 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
8426 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
8427 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
8428 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
8429 // CHECK14: omp.inner.for.end:
8430 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8431 // CHECK14: omp.loop.exit:
8432 // CHECK14-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8433 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
8434 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
8435 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8436 // CHECK14-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8437 // CHECK14-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8438 // CHECK14: .omp.final.then:
8439 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8440 // CHECK14-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
8441 // CHECK14-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
8442 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
8443 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
8444 // CHECK14-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
8445 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8446 // CHECK14: .omp.final.done:
8447 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8448 // CHECK14: omp.precond.end:
8449 // CHECK14-NEXT: ret void
8452 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
8453 // CHECK14-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8454 // CHECK14-NEXT: entry:
8455 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8456 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8457 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8458 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8459 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8460 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8461 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8462 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8463 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
8464 // CHECK14-NEXT: ret void
8467 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
8468 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8469 // CHECK14-NEXT: entry:
8470 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8471 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8472 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8473 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8474 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8475 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8476 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8477 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8478 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8479 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8480 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8481 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8482 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8483 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8484 // CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
8485 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8486 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8487 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8488 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8489 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8490 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8491 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8492 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8493 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8494 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8495 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8496 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8497 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8498 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8499 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8500 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8501 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8502 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8503 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8504 // CHECK14: omp.precond.then:
8505 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
8506 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8507 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
8508 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8509 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8510 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8511 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
8512 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8513 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8514 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8515 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
8516 // CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8517 // CHECK14: cond.true:
8518 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8519 // CHECK14-NEXT: br label [[COND_END:%.*]]
8520 // CHECK14: cond.false:
8521 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8522 // CHECK14-NEXT: br label [[COND_END]]
8523 // CHECK14: cond.end:
8524 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
8525 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
8526 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
8527 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
8528 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8529 // CHECK14: omp.inner.for.cond:
8530 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
8531 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
8532 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
8533 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8534 // CHECK14: omp.inner.for.body:
8535 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP22]]
8536 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
8537 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
8538 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
8539 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP22]]
8540 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8541 // CHECK14: omp.inner.for.inc:
8542 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
8543 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP22]]
8544 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
8545 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
8546 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
8547 // CHECK14: omp.inner.for.end:
8548 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8549 // CHECK14: omp.loop.exit:
8550 // CHECK14-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8551 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
8552 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
8553 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8554 // CHECK14-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
8555 // CHECK14-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8556 // CHECK14: .omp.final.then:
8557 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8558 // CHECK14-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
8559 // CHECK14-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8560 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8561 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8562 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
8563 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8564 // CHECK14: .omp.final.done:
8565 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8566 // CHECK14: omp.precond.end:
8567 // CHECK14-NEXT: ret void
8570 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
8571 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8572 // CHECK14-NEXT: entry:
8573 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8574 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8575 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8576 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8577 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8578 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8579 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8580 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8581 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8582 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8583 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8584 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8585 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8586 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8587 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8588 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8589 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
8590 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8591 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8592 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8593 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8594 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8595 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8596 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8597 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8598 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8599 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8600 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8601 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8602 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8603 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8604 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8605 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8606 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8607 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8608 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8609 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8610 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8611 // CHECK14: omp.precond.then:
8612 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
8613 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8614 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
8615 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8616 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
8617 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8618 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
8619 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
8620 // CHECK14-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
8621 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8622 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8623 // CHECK14-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8624 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
8625 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8626 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8627 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8628 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
8629 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8630 // CHECK14: cond.true:
8631 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8632 // CHECK14-NEXT: br label [[COND_END:%.*]]
8633 // CHECK14: cond.false:
8634 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8635 // CHECK14-NEXT: br label [[COND_END]]
8636 // CHECK14: cond.end:
8637 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
8638 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
8639 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
8640 // CHECK14-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
8641 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8642 // CHECK14: omp.inner.for.cond:
8643 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
8644 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
8645 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
8646 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8647 // CHECK14: omp.inner.for.body:
8648 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
8649 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
8650 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8651 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
8652 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
8653 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
8654 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
8655 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
8656 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8657 // CHECK14: omp.body.continue:
8658 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8659 // CHECK14: omp.inner.for.inc:
8660 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
8661 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
8662 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
8663 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
8664 // CHECK14: omp.inner.for.end:
8665 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8666 // CHECK14: omp.loop.exit:
8667 // CHECK14-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8668 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
8669 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
8670 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8671 // CHECK14-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8672 // CHECK14-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8673 // CHECK14: .omp.final.then:
8674 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8675 // CHECK14-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
8676 // CHECK14-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
8677 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
8678 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
8679 // CHECK14-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
8680 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8681 // CHECK14: .omp.final.done:
8682 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8683 // CHECK14: omp.precond.end:
8684 // CHECK14-NEXT: ret void
8687 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
8688 // CHECK14-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8689 // CHECK14-NEXT: entry:
8690 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
8691 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8692 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8693 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8694 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8695 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8696 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
8697 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8698 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8699 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8700 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8701 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8702 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
8703 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
8704 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8705 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
8706 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
8707 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
8708 // CHECK14-NEXT: ret void
8711 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
8712 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
8713 // CHECK14-NEXT: entry:
8714 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8715 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8716 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8717 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8718 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8719 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8720 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8721 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8722 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8723 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8724 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8725 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8726 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8727 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8728 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8729 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
8730 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8731 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8732 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8733 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8734 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8735 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8736 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
8737 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8738 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8739 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8740 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8741 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8742 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8743 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8744 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8745 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8746 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
8747 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8748 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8749 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8750 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8751 // CHECK14: omp.precond.then:
8752 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
8753 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8754 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
8755 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8756 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8757 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
8758 // CHECK14-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8759 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
8760 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
8761 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8762 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8763 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8764 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8765 // CHECK14: cond.true:
8766 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8767 // CHECK14-NEXT: br label [[COND_END:%.*]]
8768 // CHECK14: cond.false:
8769 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8770 // CHECK14-NEXT: br label [[COND_END]]
8771 // CHECK14: cond.end:
8772 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8773 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
8774 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
8775 // CHECK14-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
8776 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8777 // CHECK14: omp.inner.for.cond:
8778 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
8779 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
8780 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
8781 // CHECK14-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
8782 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8783 // CHECK14: omp.inner.for.body:
8784 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8785 // CHECK14-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
8786 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8787 // CHECK14-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8788 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
8789 // CHECK14-NEXT: store i32 [[TMP21]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP28]]
8790 // CHECK14-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP28]]
8791 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i64 [[TMP18]], i64 [[TMP20]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP22]]), !llvm.access.group [[ACC_GRP28]]
8792 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8793 // CHECK14: omp.inner.for.inc:
8794 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
8795 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
8796 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
8797 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
8798 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8799 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
8800 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
8801 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8802 // CHECK14-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8803 // CHECK14-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
8804 // CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
8805 // CHECK14-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8806 // CHECK14-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8807 // CHECK14-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
8808 // CHECK14-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
8809 // CHECK14-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
8810 // CHECK14: cond.true11:
8811 // CHECK14-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
8812 // CHECK14-NEXT: br label [[COND_END13:%.*]]
8813 // CHECK14: cond.false12:
8814 // CHECK14-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8815 // CHECK14-NEXT: br label [[COND_END13]]
8816 // CHECK14: cond.end13:
8817 // CHECK14-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE11]] ], [ [[TMP32]], [[COND_FALSE12]] ]
8818 // CHECK14-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8819 // CHECK14-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8820 // CHECK14-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
8821 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
8822 // CHECK14: omp.inner.for.end:
8823 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8824 // CHECK14: omp.loop.exit:
8825 // CHECK14-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8826 // CHECK14-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4
8827 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP35]])
8828 // CHECK14-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8829 // CHECK14-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
8830 // CHECK14-NEXT: br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8831 // CHECK14: .omp.final.then:
8832 // CHECK14-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8833 // CHECK14-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP38]], 0
8834 // CHECK14-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
8835 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
8836 // CHECK14-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
8837 // CHECK14-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
8838 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8839 // CHECK14: .omp.final.done:
8840 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8841 // CHECK14: omp.precond.end:
8842 // CHECK14-NEXT: ret void
8845 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
8846 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
8847 // CHECK14-NEXT: entry:
8848 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8849 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8850 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8851 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8852 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8853 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8854 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8855 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8856 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8857 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8858 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8859 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8860 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8861 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8862 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8863 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8864 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8865 // CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
8866 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8867 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8868 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8869 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8870 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8871 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8872 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8873 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
8874 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8875 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8876 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8877 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8878 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8879 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8880 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8881 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8882 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8883 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
8884 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8885 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8886 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8887 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8888 // CHECK14: omp.precond.then:
8889 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
8890 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8891 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
8892 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8893 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
8894 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8895 // CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
8896 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
8897 // CHECK14-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
8898 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8899 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8900 // CHECK14-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8901 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
8902 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8903 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8904 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8905 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
8906 // CHECK14-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8907 // CHECK14: cond.true:
8908 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8909 // CHECK14-NEXT: br label [[COND_END:%.*]]
8910 // CHECK14: cond.false:
8911 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8912 // CHECK14-NEXT: br label [[COND_END]]
8913 // CHECK14: cond.end:
8914 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
8915 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
8916 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
8917 // CHECK14-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
8918 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8919 // CHECK14: omp.inner.for.cond:
8920 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
8921 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]]
8922 // CHECK14-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
8923 // CHECK14-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8924 // CHECK14: omp.inner.for.body:
8925 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
8926 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
8927 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8928 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
8929 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
8930 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
8931 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
8932 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
8933 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8934 // CHECK14: omp.body.continue:
8935 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8936 // CHECK14: omp.inner.for.inc:
8937 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
8938 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], 1
8939 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
8940 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
8941 // CHECK14: omp.inner.for.end:
8942 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8943 // CHECK14: omp.loop.exit:
8944 // CHECK14-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8945 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
8946 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
8947 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8948 // CHECK14-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8949 // CHECK14-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8950 // CHECK14: .omp.final.then:
8951 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8952 // CHECK14-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP25]], 0
8953 // CHECK14-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
8954 // CHECK14-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
8955 // CHECK14-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8956 // CHECK14-NEXT: store i32 [[ADD12]], ptr [[I5]], align 4
8957 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8958 // CHECK14: .omp.final.done:
8959 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8960 // CHECK14: omp.precond.end:
8961 // CHECK14-NEXT: ret void
8964 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
8965 // CHECK14-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8966 // CHECK14-NEXT: entry:
8967 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8968 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8969 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8970 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8971 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8972 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8973 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8974 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8975 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
8976 // CHECK14-NEXT: ret void
8979 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
8980 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8981 // CHECK14-NEXT: entry:
8982 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8983 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8984 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8985 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8986 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8987 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8988 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8989 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8990 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8991 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8992 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8993 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8994 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8995 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8996 // CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
8997 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8998 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8999 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9000 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9001 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9002 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9003 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9004 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9005 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9006 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
9007 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9008 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9009 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9010 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9011 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9012 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9013 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9014 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9015 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9016 // CHECK14: omp.precond.then:
9017 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9018 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9019 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
9020 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9021 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9022 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9023 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
9024 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9025 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9026 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9027 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
9028 // CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9029 // CHECK14: cond.true:
9030 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9031 // CHECK14-NEXT: br label [[COND_END:%.*]]
9032 // CHECK14: cond.false:
9033 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9034 // CHECK14-NEXT: br label [[COND_END]]
9035 // CHECK14: cond.end:
9036 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
9037 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9038 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9039 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
9040 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9041 // CHECK14: omp.inner.for.cond:
9042 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
9043 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
9044 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
9045 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9046 // CHECK14: omp.inner.for.body:
9047 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP34]]
9048 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
9049 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
9050 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
9051 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP34]]
9052 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9053 // CHECK14: omp.inner.for.inc:
9054 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
9055 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP34]]
9056 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
9057 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
9058 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
9059 // CHECK14: omp.inner.for.end:
9060 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9061 // CHECK14: omp.loop.exit:
9062 // CHECK14-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9063 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
9064 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
9065 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9066 // CHECK14-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
9067 // CHECK14-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9068 // CHECK14: .omp.final.then:
9069 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9070 // CHECK14-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
9071 // CHECK14-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9072 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9073 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9074 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
9075 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9076 // CHECK14: .omp.final.done:
9077 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9078 // CHECK14: omp.precond.end:
9079 // CHECK14-NEXT: ret void
9082 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
9083 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9084 // CHECK14-NEXT: entry:
9085 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9086 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9087 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9088 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9089 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
9090 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9091 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9092 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9093 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9094 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9095 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9096 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9097 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9098 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9099 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9100 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9101 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
9102 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9103 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9104 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9105 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9106 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9107 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9108 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9109 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9110 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9111 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9112 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9113 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
9114 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9115 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9116 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9117 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9118 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9119 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9120 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9121 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9122 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9123 // CHECK14: omp.precond.then:
9124 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9125 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9126 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
9127 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9128 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
9129 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9130 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
9131 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9132 // CHECK14-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
9133 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9134 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9135 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9136 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9137 // CHECK14-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9138 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
9139 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
9140 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
9141 // CHECK14: omp.dispatch.cond:
9142 // CHECK14-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9143 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
9144 // CHECK14-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
9145 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
9146 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9147 // CHECK14: omp.dispatch.body:
9148 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9149 // CHECK14-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
9150 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9151 // CHECK14: omp.inner.for.cond:
9152 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]]
9153 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]]
9154 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9155 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9156 // CHECK14: omp.inner.for.body:
9157 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
9158 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9159 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9160 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
9161 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
9162 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
9163 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
9164 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]]
9165 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9166 // CHECK14: omp.body.continue:
9167 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9168 // CHECK14: omp.inner.for.inc:
9169 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
9170 // CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
9171 // CHECK14-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
9172 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
9173 // CHECK14: omp.inner.for.end:
9174 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
9175 // CHECK14: omp.dispatch.inc:
9176 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
9177 // CHECK14: omp.dispatch.end:
9178 // CHECK14-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9179 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
9180 // CHECK14-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP23]])
9181 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9182 // CHECK14-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
9183 // CHECK14-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9184 // CHECK14: .omp.final.then:
9185 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9186 // CHECK14-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP26]], 0
9187 // CHECK14-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9188 // CHECK14-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
9189 // CHECK14-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
9190 // CHECK14-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
9191 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9192 // CHECK14: .omp.final.done:
9193 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9194 // CHECK14: omp.precond.end:
9195 // CHECK14-NEXT: ret void
9198 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
9199 // CHECK14-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9200 // CHECK14-NEXT: entry:
9201 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
9202 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
9203 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9204 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9205 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9206 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9207 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
9208 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
9209 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9210 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9211 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9212 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9213 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
9214 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
9215 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9216 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
9217 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
9218 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
9219 // CHECK14-NEXT: ret void
9222 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
9223 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
9224 // CHECK14-NEXT: entry:
9225 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9226 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9227 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
9228 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9229 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9230 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9231 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9232 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9233 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9234 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9235 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9236 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9237 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9238 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9239 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9240 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
9241 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9242 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9243 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9244 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9245 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9246 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9247 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
9248 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9249 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9250 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9251 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9252 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9253 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9254 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9255 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9256 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9257 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
9258 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9259 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9260 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9261 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9262 // CHECK14: omp.precond.then:
9263 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9264 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9265 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
9266 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9267 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9268 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9269 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
9270 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9271 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9272 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9273 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
9274 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9275 // CHECK14: cond.true:
9276 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9277 // CHECK14-NEXT: br label [[COND_END:%.*]]
9278 // CHECK14: cond.false:
9279 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9280 // CHECK14-NEXT: br label [[COND_END]]
9281 // CHECK14: cond.end:
9282 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
9283 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9284 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9285 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
9286 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9287 // CHECK14: omp.inner.for.cond:
9288 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]]
9289 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
9290 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
9291 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9292 // CHECK14: omp.inner.for.body:
9293 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP40]]
9294 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
9295 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
9296 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
9297 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP40]]
9298 // CHECK14-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP40]]
9299 // CHECK14-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP40]]
9300 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP21]]), !llvm.access.group [[ACC_GRP40]]
9301 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9302 // CHECK14: omp.inner.for.inc:
9303 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
9304 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP40]]
9305 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
9306 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
9307 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
9308 // CHECK14: omp.inner.for.end:
9309 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9310 // CHECK14: omp.loop.exit:
9311 // CHECK14-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9312 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
9313 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP25]])
9314 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9315 // CHECK14-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
9316 // CHECK14-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9317 // CHECK14: .omp.final.then:
9318 // CHECK14-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9319 // CHECK14-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP28]], 0
9320 // CHECK14-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9321 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
9322 // CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
9323 // CHECK14-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
9324 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9325 // CHECK14: .omp.final.done:
9326 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9327 // CHECK14: omp.precond.end:
9328 // CHECK14-NEXT: ret void
9331 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
9332 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
9333 // CHECK14-NEXT: entry:
9334 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9335 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9336 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9337 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9338 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
9339 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9340 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9341 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9342 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9343 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9344 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9345 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9346 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9347 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9348 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9349 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9350 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9351 // CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
9352 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9353 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9354 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9355 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9356 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9357 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9358 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9359 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
9360 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9361 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9362 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9363 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9364 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9365 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9366 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9367 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9368 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9369 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
9370 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9371 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9372 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9373 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9374 // CHECK14: omp.precond.then:
9375 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9376 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9377 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
9378 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9379 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
9380 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9381 // CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
9382 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9383 // CHECK14-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
9384 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9385 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9386 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
9387 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9388 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9389 // CHECK14-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9390 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
9391 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 1073741859, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
9392 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
9393 // CHECK14: omp.dispatch.cond:
9394 // CHECK14-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9395 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
9396 // CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
9397 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
9398 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9399 // CHECK14: omp.dispatch.body:
9400 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9401 // CHECK14-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
9402 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9403 // CHECK14: omp.inner.for.cond:
9404 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]]
9405 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP43]]
9406 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
9407 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9408 // CHECK14: omp.inner.for.body:
9409 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
9410 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
9411 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9412 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
9413 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
9414 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9415 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
9416 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]]
9417 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9418 // CHECK14: omp.body.continue:
9419 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9420 // CHECK14: omp.inner.for.inc:
9421 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
9422 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], 1
9423 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
9424 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
9425 // CHECK14: omp.inner.for.end:
9426 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
9427 // CHECK14: omp.dispatch.inc:
9428 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
9429 // CHECK14: omp.dispatch.end:
9430 // CHECK14-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9431 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
9432 // CHECK14-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP24]])
9433 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9434 // CHECK14-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
9435 // CHECK14-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9436 // CHECK14: .omp.final.then:
9437 // CHECK14-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9438 // CHECK14-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP27]], 0
9439 // CHECK14-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
9440 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
9441 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
9442 // CHECK14-NEXT: store i32 [[ADD11]], ptr [[I5]], align 4
9443 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9444 // CHECK14: .omp.final.done:
9445 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9446 // CHECK14: omp.precond.end:
9447 // CHECK14-NEXT: ret void
9450 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
9451 // CHECK14-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
9452 // CHECK14-NEXT: entry:
9453 // CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
9454 // CHECK14-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
9455 // CHECK14-NEXT: [[M:%.*]] = alloca i32, align 4
9456 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
9457 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
9458 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
9459 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9460 // CHECK14-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
9461 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 8
9462 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 8
9463 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 8
9464 // CHECK14-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
9465 // CHECK14-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9466 // CHECK14-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
9467 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 8
9468 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 8
9469 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 8
9470 // CHECK14-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
9471 // CHECK14-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9472 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 8
9473 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 8
9474 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 8
9475 // CHECK14-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
9476 // CHECK14-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9477 // CHECK14-NEXT: [[M_CASTED22:%.*]] = alloca i64, align 8
9478 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 8
9479 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 8
9480 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 8
9481 // CHECK14-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
9482 // CHECK14-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9483 // CHECK14-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
9484 // CHECK14-NEXT: store i32 10, ptr [[M]], align 4
9485 // CHECK14-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9486 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP0]], align 8
9487 // CHECK14-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9488 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
9489 // CHECK14-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
9490 // CHECK14-NEXT: store ptr null, ptr [[TMP2]], align 8
9491 // CHECK14-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9492 // CHECK14-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9493 // CHECK14-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
9494 // CHECK14-NEXT: store i32 3, ptr [[TMP5]], align 4
9495 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
9496 // CHECK14-NEXT: store i32 1, ptr [[TMP6]], align 4
9497 // CHECK14-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
9498 // CHECK14-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
9499 // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
9500 // CHECK14-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
9501 // CHECK14-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
9502 // CHECK14-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 8
9503 // CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
9504 // CHECK14-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 8
9505 // CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
9506 // CHECK14-NEXT: store ptr null, ptr [[TMP11]], align 8
9507 // CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
9508 // CHECK14-NEXT: store ptr null, ptr [[TMP12]], align 8
9509 // CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
9510 // CHECK14-NEXT: store i64 10, ptr [[TMP13]], align 8
9511 // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
9512 // CHECK14-NEXT: store i64 0, ptr [[TMP14]], align 8
9513 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
9514 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
9515 // CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
9516 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
9517 // CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
9518 // CHECK14-NEXT: store i32 0, ptr [[TMP17]], align 4
9519 // CHECK14-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
9520 // CHECK14-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
9521 // CHECK14-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9522 // CHECK14: omp_offload.failed:
9523 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR3]]
9524 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]]
9525 // CHECK14: omp_offload.cont:
9526 // CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
9527 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP20]], align 8
9528 // CHECK14-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
9529 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP21]], align 8
9530 // CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
9531 // CHECK14-NEXT: store ptr null, ptr [[TMP22]], align 8
9532 // CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
9533 // CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
9534 // CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
9535 // CHECK14-NEXT: store i32 3, ptr [[TMP25]], align 4
9536 // CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
9537 // CHECK14-NEXT: store i32 1, ptr [[TMP26]], align 4
9538 // CHECK14-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
9539 // CHECK14-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
9540 // CHECK14-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
9541 // CHECK14-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
9542 // CHECK14-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
9543 // CHECK14-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 8
9544 // CHECK14-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
9545 // CHECK14-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 8
9546 // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
9547 // CHECK14-NEXT: store ptr null, ptr [[TMP31]], align 8
9548 // CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
9549 // CHECK14-NEXT: store ptr null, ptr [[TMP32]], align 8
9550 // CHECK14-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
9551 // CHECK14-NEXT: store i64 10, ptr [[TMP33]], align 8
9552 // CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
9553 // CHECK14-NEXT: store i64 0, ptr [[TMP34]], align 8
9554 // CHECK14-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
9555 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
9556 // CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
9557 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
9558 // CHECK14-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
9559 // CHECK14-NEXT: store i32 0, ptr [[TMP37]], align 4
9560 // CHECK14-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
9561 // CHECK14-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
9562 // CHECK14-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
9563 // CHECK14: omp_offload.failed6:
9564 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR3]]
9565 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT7]]
9566 // CHECK14: omp_offload.cont7:
9567 // CHECK14-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
9568 // CHECK14-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
9569 // CHECK14-NEXT: [[TMP41:%.*]] = load i64, ptr [[M_CASTED]], align 8
9570 // CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
9571 // CHECK14-NEXT: store i64 [[TMP41]], ptr [[TMP42]], align 8
9572 // CHECK14-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
9573 // CHECK14-NEXT: store i64 [[TMP41]], ptr [[TMP43]], align 8
9574 // CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 0
9575 // CHECK14-NEXT: store ptr null, ptr [[TMP44]], align 8
9576 // CHECK14-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
9577 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP45]], align 8
9578 // CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
9579 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP46]], align 8
9580 // CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 1
9581 // CHECK14-NEXT: store ptr null, ptr [[TMP47]], align 8
9582 // CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
9583 // CHECK14-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
9584 // CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
9585 // CHECK14-NEXT: store i32 3, ptr [[TMP50]], align 4
9586 // CHECK14-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
9587 // CHECK14-NEXT: store i32 2, ptr [[TMP51]], align 4
9588 // CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
9589 // CHECK14-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 8
9590 // CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
9591 // CHECK14-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 8
9592 // CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
9593 // CHECK14-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 8
9594 // CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
9595 // CHECK14-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 8
9596 // CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
9597 // CHECK14-NEXT: store ptr null, ptr [[TMP56]], align 8
9598 // CHECK14-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
9599 // CHECK14-NEXT: store ptr null, ptr [[TMP57]], align 8
9600 // CHECK14-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
9601 // CHECK14-NEXT: store i64 10, ptr [[TMP58]], align 8
9602 // CHECK14-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
9603 // CHECK14-NEXT: store i64 0, ptr [[TMP59]], align 8
9604 // CHECK14-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
9605 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
9606 // CHECK14-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
9607 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
9608 // CHECK14-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
9609 // CHECK14-NEXT: store i32 0, ptr [[TMP62]], align 4
9610 // CHECK14-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
9611 // CHECK14-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
9612 // CHECK14-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
9613 // CHECK14: omp_offload.failed13:
9614 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i64 [[TMP41]], ptr [[A]]) #[[ATTR3]]
9615 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT14]]
9616 // CHECK14: omp_offload.cont14:
9617 // CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
9618 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP65]], align 8
9619 // CHECK14-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
9620 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP66]], align 8
9621 // CHECK14-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 0
9622 // CHECK14-NEXT: store ptr null, ptr [[TMP67]], align 8
9623 // CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
9624 // CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
9625 // CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
9626 // CHECK14-NEXT: store i32 3, ptr [[TMP70]], align 4
9627 // CHECK14-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
9628 // CHECK14-NEXT: store i32 1, ptr [[TMP71]], align 4
9629 // CHECK14-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
9630 // CHECK14-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 8
9631 // CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
9632 // CHECK14-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 8
9633 // CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
9634 // CHECK14-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 8
9635 // CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
9636 // CHECK14-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 8
9637 // CHECK14-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
9638 // CHECK14-NEXT: store ptr null, ptr [[TMP76]], align 8
9639 // CHECK14-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
9640 // CHECK14-NEXT: store ptr null, ptr [[TMP77]], align 8
9641 // CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
9642 // CHECK14-NEXT: store i64 10, ptr [[TMP78]], align 8
9643 // CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
9644 // CHECK14-NEXT: store i64 0, ptr [[TMP79]], align 8
9645 // CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
9646 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
9647 // CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
9648 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
9649 // CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
9650 // CHECK14-NEXT: store i32 0, ptr [[TMP82]], align 4
9651 // CHECK14-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
9652 // CHECK14-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
9653 // CHECK14-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
9654 // CHECK14: omp_offload.failed20:
9655 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR3]]
9656 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT21]]
9657 // CHECK14: omp_offload.cont21:
9658 // CHECK14-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
9659 // CHECK14-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
9660 // CHECK14-NEXT: [[TMP86:%.*]] = load i64, ptr [[M_CASTED22]], align 8
9661 // CHECK14-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
9662 // CHECK14-NEXT: store i64 [[TMP86]], ptr [[TMP87]], align 8
9663 // CHECK14-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
9664 // CHECK14-NEXT: store i64 [[TMP86]], ptr [[TMP88]], align 8
9665 // CHECK14-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 0
9666 // CHECK14-NEXT: store ptr null, ptr [[TMP89]], align 8
9667 // CHECK14-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
9668 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP90]], align 8
9669 // CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
9670 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP91]], align 8
9671 // CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 1
9672 // CHECK14-NEXT: store ptr null, ptr [[TMP92]], align 8
9673 // CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
9674 // CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
9675 // CHECK14-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
9676 // CHECK14-NEXT: store i32 3, ptr [[TMP95]], align 4
9677 // CHECK14-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
9678 // CHECK14-NEXT: store i32 2, ptr [[TMP96]], align 4
9679 // CHECK14-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
9680 // CHECK14-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 8
9681 // CHECK14-NEXT: [[TMP98:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
9682 // CHECK14-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 8
9683 // CHECK14-NEXT: [[TMP99:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
9684 // CHECK14-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 8
9685 // CHECK14-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
9686 // CHECK14-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 8
9687 // CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
9688 // CHECK14-NEXT: store ptr null, ptr [[TMP101]], align 8
9689 // CHECK14-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
9690 // CHECK14-NEXT: store ptr null, ptr [[TMP102]], align 8
9691 // CHECK14-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
9692 // CHECK14-NEXT: store i64 10, ptr [[TMP103]], align 8
9693 // CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
9694 // CHECK14-NEXT: store i64 0, ptr [[TMP104]], align 8
9695 // CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
9696 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
9697 // CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
9698 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
9699 // CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
9700 // CHECK14-NEXT: store i32 0, ptr [[TMP107]], align 4
9701 // CHECK14-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
9702 // CHECK14-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
9703 // CHECK14-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
9704 // CHECK14: omp_offload.failed28:
9705 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i64 [[TMP86]], ptr [[A]]) #[[ATTR3]]
9706 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT29]]
9707 // CHECK14: omp_offload.cont29:
9708 // CHECK14-NEXT: ret i32 0
9711 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
9712 // CHECK14-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9713 // CHECK14-NEXT: entry:
9714 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9715 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9716 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9717 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
9718 // CHECK14-NEXT: ret void
9721 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
9722 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9723 // CHECK14-NEXT: entry:
9724 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9725 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9726 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9727 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9728 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9729 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9730 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9731 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9732 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9733 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9734 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9735 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9736 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9737 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9738 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9739 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
9740 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9741 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9742 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9743 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
9744 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9745 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9746 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
9747 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9748 // CHECK14: cond.true:
9749 // CHECK14-NEXT: br label [[COND_END:%.*]]
9750 // CHECK14: cond.false:
9751 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9752 // CHECK14-NEXT: br label [[COND_END]]
9753 // CHECK14: cond.end:
9754 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
9755 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9756 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9757 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
9758 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9759 // CHECK14: omp.inner.for.cond:
9760 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]]
9761 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
9762 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
9763 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9764 // CHECK14: omp.inner.for.body:
9765 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP46]]
9766 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
9767 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
9768 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
9769 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP46]]
9770 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9771 // CHECK14: omp.inner.for.inc:
9772 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
9773 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP46]]
9774 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
9775 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
9776 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
9777 // CHECK14: omp.inner.for.end:
9778 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9779 // CHECK14: omp.loop.exit:
9780 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
9781 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9782 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
9783 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9784 // CHECK14: .omp.final.then:
9785 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
9786 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9787 // CHECK14: .omp.final.done:
9788 // CHECK14-NEXT: ret void
9791 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
9792 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9793 // CHECK14-NEXT: entry:
9794 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9795 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9796 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9797 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9798 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9799 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9800 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9801 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9802 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9803 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9804 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9805 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9806 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9807 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9808 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9809 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9810 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9811 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9812 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9813 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
9814 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9815 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
9816 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9817 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
9818 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9819 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
9820 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9821 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9822 // CHECK14-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9823 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
9824 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9825 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9826 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
9827 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9828 // CHECK14: cond.true:
9829 // CHECK14-NEXT: br label [[COND_END:%.*]]
9830 // CHECK14: cond.false:
9831 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9832 // CHECK14-NEXT: br label [[COND_END]]
9833 // CHECK14: cond.end:
9834 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
9835 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
9836 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9837 // CHECK14-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
9838 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9839 // CHECK14: omp.inner.for.cond:
9840 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]]
9841 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP49]]
9842 // CHECK14-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
9843 // CHECK14-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9844 // CHECK14: omp.inner.for.body:
9845 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
9846 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
9847 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9848 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
9849 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
9850 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
9851 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
9852 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP49]]
9853 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9854 // CHECK14: omp.body.continue:
9855 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9856 // CHECK14: omp.inner.for.inc:
9857 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
9858 // CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
9859 // CHECK14-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
9860 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
9861 // CHECK14: omp.inner.for.end:
9862 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9863 // CHECK14: omp.loop.exit:
9864 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
9865 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9866 // CHECK14-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
9867 // CHECK14-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9868 // CHECK14: .omp.final.then:
9869 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
9870 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9871 // CHECK14: .omp.final.done:
9872 // CHECK14-NEXT: ret void
9875 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
9876 // CHECK14-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9877 // CHECK14-NEXT: entry:
9878 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9879 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9880 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9881 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
9882 // CHECK14-NEXT: ret void
9885 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
9886 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9887 // CHECK14-NEXT: entry:
9888 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9889 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9890 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9891 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9892 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9893 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9894 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9895 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9896 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9897 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9898 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9899 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9900 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9901 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9902 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9903 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
9904 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9905 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9906 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9907 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
9908 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9909 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9910 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
9911 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9912 // CHECK14: cond.true:
9913 // CHECK14-NEXT: br label [[COND_END:%.*]]
9914 // CHECK14: cond.false:
9915 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9916 // CHECK14-NEXT: br label [[COND_END]]
9917 // CHECK14: cond.end:
9918 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
9919 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9920 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9921 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
9922 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9923 // CHECK14: omp.inner.for.cond:
9924 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]]
9925 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
9926 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
9927 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9928 // CHECK14: omp.inner.for.body:
9929 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP52]]
9930 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
9931 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
9932 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
9933 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP52]]
9934 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9935 // CHECK14: omp.inner.for.inc:
9936 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
9937 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP52]]
9938 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
9939 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
9940 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
9941 // CHECK14: omp.inner.for.end:
9942 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9943 // CHECK14: omp.loop.exit:
9944 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
9945 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9946 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
9947 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9948 // CHECK14: .omp.final.then:
9949 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
9950 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9951 // CHECK14: .omp.final.done:
9952 // CHECK14-NEXT: ret void
9955 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
9956 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9957 // CHECK14-NEXT: entry:
9958 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9959 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9960 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9961 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9962 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9963 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9964 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9965 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9966 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9967 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9968 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9969 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9970 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9971 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9972 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9973 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9974 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9975 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9976 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9977 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
9978 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9979 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
9980 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9981 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
9982 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9983 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
9984 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9985 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9986 // CHECK14-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9987 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
9988 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9989 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9990 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
9991 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9992 // CHECK14: cond.true:
9993 // CHECK14-NEXT: br label [[COND_END:%.*]]
9994 // CHECK14: cond.false:
9995 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9996 // CHECK14-NEXT: br label [[COND_END]]
9997 // CHECK14: cond.end:
9998 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
9999 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
10000 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10001 // CHECK14-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
10002 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10003 // CHECK14: omp.inner.for.cond:
10004 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]]
10005 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP55]]
10006 // CHECK14-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
10007 // CHECK14-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10008 // CHECK14: omp.inner.for.body:
10009 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
10010 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
10011 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10012 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
10013 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
10014 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
10015 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10016 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP55]]
10017 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10018 // CHECK14: omp.body.continue:
10019 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10020 // CHECK14: omp.inner.for.inc:
10021 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
10022 // CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
10023 // CHECK14-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
10024 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
10025 // CHECK14: omp.inner.for.end:
10026 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10027 // CHECK14: omp.loop.exit:
10028 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
10029 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10030 // CHECK14-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
10031 // CHECK14-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10032 // CHECK14: .omp.final.then:
10033 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10034 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10035 // CHECK14: .omp.final.done:
10036 // CHECK14-NEXT: ret void
10039 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
10040 // CHECK14-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10041 // CHECK14-NEXT: entry:
10042 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
10043 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10044 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10045 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10046 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
10047 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10048 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10049 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
10050 // CHECK14-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
10051 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
10052 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
10053 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
10054 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
10055 // CHECK14-NEXT: ret void
10058 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
10059 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10060 // CHECK14-NEXT: entry:
10061 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10062 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10063 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10064 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10065 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10066 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10067 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10068 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10069 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10070 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10071 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10072 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10073 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10074 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10075 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10076 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10077 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10078 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
10079 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
10080 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10081 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10082 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10083 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
10084 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
10085 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10086 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10087 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10088 // CHECK14: cond.true:
10089 // CHECK14-NEXT: br label [[COND_END:%.*]]
10090 // CHECK14: cond.false:
10091 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10092 // CHECK14-NEXT: br label [[COND_END]]
10093 // CHECK14: cond.end:
10094 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10095 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
10096 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
10097 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
10098 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10099 // CHECK14: omp.inner.for.cond:
10100 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]]
10101 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
10102 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10103 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10104 // CHECK14: omp.inner.for.body:
10105 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP58]]
10106 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10107 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
10108 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10109 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP58]]
10110 // CHECK14-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP58]]
10111 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP58]]
10112 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP58]]
10113 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10114 // CHECK14: omp.inner.for.inc:
10115 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
10116 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP58]]
10117 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
10118 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
10119 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
10120 // CHECK14: omp.inner.for.end:
10121 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10122 // CHECK14: omp.loop.exit:
10123 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
10124 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10125 // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
10126 // CHECK14-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10127 // CHECK14: .omp.final.then:
10128 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10129 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10130 // CHECK14: .omp.final.done:
10131 // CHECK14-NEXT: ret void
10134 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
10135 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10136 // CHECK14-NEXT: entry:
10137 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10138 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10139 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10140 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10141 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10142 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10143 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10144 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10145 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10146 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10147 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10148 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10149 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10150 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10151 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10152 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10153 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10154 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10155 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10156 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10157 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
10158 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
10159 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10160 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10161 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10162 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10163 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
10164 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
10165 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10166 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10167 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
10168 // CHECK14-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10169 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
10170 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
10171 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
10172 // CHECK14: omp.dispatch.cond:
10173 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10174 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10175 // CHECK14-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP7]] to i32
10176 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV2]]
10177 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10178 // CHECK14: cond.true:
10179 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10180 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
10181 // CHECK14-NEXT: br label [[COND_END:%.*]]
10182 // CHECK14: cond.false:
10183 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10184 // CHECK14-NEXT: br label [[COND_END]]
10185 // CHECK14: cond.end:
10186 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
10187 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
10188 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10189 // CHECK14-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
10190 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
10191 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10192 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
10193 // CHECK14-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10194 // CHECK14: omp.dispatch.body:
10195 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10196 // CHECK14: omp.inner.for.cond:
10197 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61:![0-9]+]]
10198 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP61]]
10199 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
10200 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10201 // CHECK14: omp.inner.for.body:
10202 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
10203 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
10204 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10205 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
10206 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
10207 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
10208 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10209 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP61]]
10210 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10211 // CHECK14: omp.body.continue:
10212 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10213 // CHECK14: omp.inner.for.inc:
10214 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
10215 // CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], 1
10216 // CHECK14-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
10217 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
10218 // CHECK14: omp.inner.for.end:
10219 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
10220 // CHECK14: omp.dispatch.inc:
10221 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10222 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
10223 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
10224 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
10225 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10226 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
10227 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
10228 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
10229 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
10230 // CHECK14: omp.dispatch.end:
10231 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP5]])
10232 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10233 // CHECK14-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
10234 // CHECK14-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10235 // CHECK14: .omp.final.then:
10236 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10237 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10238 // CHECK14: .omp.final.done:
10239 // CHECK14-NEXT: ret void
10242 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
10243 // CHECK14-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10244 // CHECK14-NEXT: entry:
10245 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10246 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10247 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10248 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
10249 // CHECK14-NEXT: ret void
10252 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
10253 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10254 // CHECK14-NEXT: entry:
10255 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10256 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10257 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10258 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10259 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10260 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10261 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10262 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10263 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10264 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10265 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10266 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10267 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10268 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10269 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
10270 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
10271 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10272 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10273 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10274 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
10275 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
10276 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10277 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10278 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10279 // CHECK14: cond.true:
10280 // CHECK14-NEXT: br label [[COND_END:%.*]]
10281 // CHECK14: cond.false:
10282 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10283 // CHECK14-NEXT: br label [[COND_END]]
10284 // CHECK14: cond.end:
10285 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10286 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
10287 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
10288 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
10289 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10290 // CHECK14: omp.inner.for.cond:
10291 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64:![0-9]+]]
10292 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
10293 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10294 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10295 // CHECK14: omp.inner.for.body:
10296 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP64]]
10297 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10298 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
10299 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10300 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP64]]
10301 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10302 // CHECK14: omp.inner.for.inc:
10303 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
10304 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP64]]
10305 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
10306 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
10307 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
10308 // CHECK14: omp.inner.for.end:
10309 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10310 // CHECK14: omp.loop.exit:
10311 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
10312 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10313 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
10314 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10315 // CHECK14: .omp.final.then:
10316 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10317 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10318 // CHECK14: .omp.final.done:
10319 // CHECK14-NEXT: ret void
10322 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
10323 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10324 // CHECK14-NEXT: entry:
10325 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10326 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10327 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10328 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10329 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10330 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10331 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10332 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10333 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10334 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10335 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10336 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10337 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10338 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10339 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10340 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10341 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10342 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10343 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
10344 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
10345 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10346 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10347 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10348 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10349 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
10350 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
10351 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10352 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10353 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10354 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10355 // CHECK14-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10356 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
10357 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
10358 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
10359 // CHECK14: omp.dispatch.cond:
10360 // CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
10361 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
10362 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10363 // CHECK14: omp.dispatch.body:
10364 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10365 // CHECK14-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
10366 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10367 // CHECK14: omp.inner.for.cond:
10368 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67:![0-9]+]]
10369 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP67]]
10370 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
10371 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10372 // CHECK14: omp.inner.for.body:
10373 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
10374 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
10375 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10376 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
10377 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
10378 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
10379 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10380 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP67]]
10381 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10382 // CHECK14: omp.body.continue:
10383 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10384 // CHECK14: omp.inner.for.inc:
10385 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
10386 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
10387 // CHECK14-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
10388 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
10389 // CHECK14: omp.inner.for.end:
10390 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
10391 // CHECK14: omp.dispatch.inc:
10392 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
10393 // CHECK14: omp.dispatch.end:
10394 // CHECK14-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
10395 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10396 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
10397 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10398 // CHECK14: .omp.final.then:
10399 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10400 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10401 // CHECK14: .omp.final.done:
10402 // CHECK14-NEXT: ret void
10405 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
10406 // CHECK14-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10407 // CHECK14-NEXT: entry:
10408 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
10409 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10410 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10411 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10412 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
10413 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10414 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10415 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
10416 // CHECK14-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
10417 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
10418 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
10419 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
10420 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
10421 // CHECK14-NEXT: ret void
10424 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
10425 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10426 // CHECK14-NEXT: entry:
10427 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10428 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10429 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10430 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10431 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10432 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10433 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10434 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10435 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10436 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10437 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10438 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10439 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10440 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10441 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10442 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10443 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10444 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
10445 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
10446 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10447 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10448 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10449 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
10450 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
10451 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10452 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10453 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10454 // CHECK14: cond.true:
10455 // CHECK14-NEXT: br label [[COND_END:%.*]]
10456 // CHECK14: cond.false:
10457 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10458 // CHECK14-NEXT: br label [[COND_END]]
10459 // CHECK14: cond.end:
10460 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10461 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
10462 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
10463 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
10464 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10465 // CHECK14: omp.inner.for.cond:
10466 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70:![0-9]+]]
10467 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
10468 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10469 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10470 // CHECK14: omp.inner.for.body:
10471 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP70]]
10472 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10473 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
10474 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10475 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP70]]
10476 // CHECK14-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP70]]
10477 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP70]]
10478 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP70]]
10479 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10480 // CHECK14: omp.inner.for.inc:
10481 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
10482 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP70]]
10483 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
10484 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
10485 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
10486 // CHECK14: omp.inner.for.end:
10487 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10488 // CHECK14: omp.loop.exit:
10489 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
10490 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10491 // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
10492 // CHECK14-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10493 // CHECK14: .omp.final.then:
10494 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10495 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10496 // CHECK14: .omp.final.done:
10497 // CHECK14-NEXT: ret void
10500 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
10501 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
10502 // CHECK14-NEXT: entry:
10503 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10504 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10505 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10506 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10507 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10508 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10509 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10510 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10511 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10512 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10513 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10514 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10515 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10516 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10517 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10518 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10519 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10520 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10521 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10522 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10523 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
10524 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
10525 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10526 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10527 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10528 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10529 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
10530 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
10531 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10532 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10533 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
10534 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10535 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10536 // CHECK14-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10537 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
10538 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
10539 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
10540 // CHECK14: omp.dispatch.cond:
10541 // CHECK14-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
10542 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
10543 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10544 // CHECK14: omp.dispatch.body:
10545 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10546 // CHECK14-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
10547 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10548 // CHECK14: omp.inner.for.cond:
10549 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73:![0-9]+]]
10550 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP73]]
10551 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
10552 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10553 // CHECK14: omp.inner.for.body:
10554 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
10555 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
10556 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10557 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
10558 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
10559 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
10560 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10561 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP73]]
10562 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10563 // CHECK14: omp.body.continue:
10564 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10565 // CHECK14: omp.inner.for.inc:
10566 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
10567 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP14]], 1
10568 // CHECK14-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
10569 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
10570 // CHECK14: omp.inner.for.end:
10571 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
10572 // CHECK14: omp.dispatch.inc:
10573 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
10574 // CHECK14: omp.dispatch.end:
10575 // CHECK14-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP7]])
10576 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10577 // CHECK14-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
10578 // CHECK14-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10579 // CHECK14: .omp.final.then:
10580 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10581 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10582 // CHECK14: .omp.final.done:
10583 // CHECK14-NEXT: ret void
10586 // CHECK17-LABEL: define {{[^@]+}}@main
10587 // CHECK17-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
10588 // CHECK17-NEXT: entry:
10589 // CHECK17-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
10590 // CHECK17-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
10591 // CHECK17-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 4
10592 // CHECK17-NEXT: [[N:%.*]] = alloca i32, align 4
10593 // CHECK17-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 4
10594 // CHECK17-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
10595 // CHECK17-NEXT: [[M:%.*]] = alloca i32, align 4
10596 // CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
10597 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 4
10598 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 4
10599 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 4
10600 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
10601 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
10602 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10603 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10604 // CHECK17-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
10605 // CHECK17-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
10606 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 4
10607 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 4
10608 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 4
10609 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
10610 // CHECK17-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
10611 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
10612 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
10613 // CHECK17-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10614 // CHECK17-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
10615 // CHECK17-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
10616 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 4
10617 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 4
10618 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 4
10619 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
10620 // CHECK17-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
10621 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
10622 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
10623 // CHECK17-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10624 // CHECK17-NEXT: [[N_CASTED33:%.*]] = alloca i32, align 4
10625 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 4
10626 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 4
10627 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 4
10628 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 4
10629 // CHECK17-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
10630 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
10631 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
10632 // CHECK17-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10633 // CHECK17-NEXT: [[M_CASTED48:%.*]] = alloca i32, align 4
10634 // CHECK17-NEXT: [[N_CASTED49:%.*]] = alloca i32, align 4
10635 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 4
10636 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 4
10637 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 4
10638 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 4
10639 // CHECK17-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
10640 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
10641 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
10642 // CHECK17-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10643 // CHECK17-NEXT: store i32 0, ptr [[RETVAL]], align 4
10644 // CHECK17-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
10645 // CHECK17-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4
10646 // CHECK17-NEXT: store i32 100, ptr [[N]], align 4
10647 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
10648 // CHECK17-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0()
10649 // CHECK17-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4
10650 // CHECK17-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
10651 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4
10652 // CHECK17-NEXT: store i32 10, ptr [[M]], align 4
10653 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4
10654 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4
10655 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4
10656 // CHECK17-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
10657 // CHECK17-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
10658 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 24, i1 false)
10659 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10660 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[TMP6]], align 4
10661 // CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10662 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4
10663 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
10664 // CHECK17-NEXT: store ptr null, ptr [[TMP8]], align 4
10665 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10666 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP9]], align 4
10667 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10668 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP10]], align 4
10669 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
10670 // CHECK17-NEXT: store ptr null, ptr [[TMP11]], align 4
10671 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10672 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 4
10673 // CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10674 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 4
10675 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
10676 // CHECK17-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 4
10677 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
10678 // CHECK17-NEXT: store ptr null, ptr [[TMP15]], align 4
10679 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10680 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10681 // CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
10682 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
10683 // CHECK17-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
10684 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
10685 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
10686 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10687 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10688 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
10689 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
10690 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
10691 // CHECK17-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
10692 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
10693 // CHECK17-NEXT: store i32 3, ptr [[TMP23]], align 4
10694 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
10695 // CHECK17-NEXT: store i32 3, ptr [[TMP24]], align 4
10696 // CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
10697 // CHECK17-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 4
10698 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
10699 // CHECK17-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 4
10700 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
10701 // CHECK17-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 4
10702 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
10703 // CHECK17-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 4
10704 // CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
10705 // CHECK17-NEXT: store ptr null, ptr [[TMP29]], align 4
10706 // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
10707 // CHECK17-NEXT: store ptr null, ptr [[TMP30]], align 4
10708 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
10709 // CHECK17-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
10710 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
10711 // CHECK17-NEXT: store i64 0, ptr [[TMP32]], align 8
10712 // CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
10713 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
10714 // CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
10715 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
10716 // CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
10717 // CHECK17-NEXT: store i32 0, ptr [[TMP35]], align 4
10718 // CHECK17-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
10719 // CHECK17-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
10720 // CHECK17-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10721 // CHECK17: omp_offload.failed:
10722 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3:[0-9]+]]
10723 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
10724 // CHECK17: omp_offload.cont:
10725 // CHECK17-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
10726 // CHECK17-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
10727 // CHECK17-NEXT: [[TMP39:%.*]] = load i32, ptr [[N_CASTED3]], align 4
10728 // CHECK17-NEXT: [[TMP40:%.*]] = mul nuw i32 [[TMP0]], 4
10729 // CHECK17-NEXT: [[TMP41:%.*]] = sext i32 [[TMP40]] to i64
10730 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES7]], ptr align 4 @.offload_sizes.1, i32 24, i1 false)
10731 // CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
10732 // CHECK17-NEXT: store i32 [[TMP39]], ptr [[TMP42]], align 4
10733 // CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
10734 // CHECK17-NEXT: store i32 [[TMP39]], ptr [[TMP43]], align 4
10735 // CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
10736 // CHECK17-NEXT: store ptr null, ptr [[TMP44]], align 4
10737 // CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
10738 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP45]], align 4
10739 // CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
10740 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP46]], align 4
10741 // CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
10742 // CHECK17-NEXT: store ptr null, ptr [[TMP47]], align 4
10743 // CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
10744 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 4
10745 // CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
10746 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP49]], align 4
10747 // CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
10748 // CHECK17-NEXT: store i64 [[TMP41]], ptr [[TMP50]], align 4
10749 // CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
10750 // CHECK17-NEXT: store ptr null, ptr [[TMP51]], align 4
10751 // CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
10752 // CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
10753 // CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
10754 // CHECK17-NEXT: [[TMP55:%.*]] = load i32, ptr [[N]], align 4
10755 // CHECK17-NEXT: store i32 [[TMP55]], ptr [[DOTCAPTURE_EXPR_9]], align 4
10756 // CHECK17-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
10757 // CHECK17-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP56]], 0
10758 // CHECK17-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
10759 // CHECK17-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
10760 // CHECK17-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
10761 // CHECK17-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
10762 // CHECK17-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP57]], 1
10763 // CHECK17-NEXT: [[TMP58:%.*]] = zext i32 [[ADD14]] to i64
10764 // CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
10765 // CHECK17-NEXT: store i32 3, ptr [[TMP59]], align 4
10766 // CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
10767 // CHECK17-NEXT: store i32 3, ptr [[TMP60]], align 4
10768 // CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
10769 // CHECK17-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 4
10770 // CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
10771 // CHECK17-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 4
10772 // CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
10773 // CHECK17-NEXT: store ptr [[TMP54]], ptr [[TMP63]], align 4
10774 // CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
10775 // CHECK17-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP64]], align 4
10776 // CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
10777 // CHECK17-NEXT: store ptr null, ptr [[TMP65]], align 4
10778 // CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
10779 // CHECK17-NEXT: store ptr null, ptr [[TMP66]], align 4
10780 // CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
10781 // CHECK17-NEXT: store i64 [[TMP58]], ptr [[TMP67]], align 8
10782 // CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
10783 // CHECK17-NEXT: store i64 0, ptr [[TMP68]], align 8
10784 // CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
10785 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
10786 // CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
10787 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP70]], align 4
10788 // CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
10789 // CHECK17-NEXT: store i32 0, ptr [[TMP71]], align 4
10790 // CHECK17-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
10791 // CHECK17-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0
10792 // CHECK17-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
10793 // CHECK17: omp_offload.failed16:
10794 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP39]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
10795 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT17]]
10796 // CHECK17: omp_offload.cont17:
10797 // CHECK17-NEXT: [[TMP74:%.*]] = load i32, ptr [[M]], align 4
10798 // CHECK17-NEXT: store i32 [[TMP74]], ptr [[M_CASTED]], align 4
10799 // CHECK17-NEXT: [[TMP75:%.*]] = load i32, ptr [[M_CASTED]], align 4
10800 // CHECK17-NEXT: [[TMP76:%.*]] = load i32, ptr [[N]], align 4
10801 // CHECK17-NEXT: store i32 [[TMP76]], ptr [[N_CASTED18]], align 4
10802 // CHECK17-NEXT: [[TMP77:%.*]] = load i32, ptr [[N_CASTED18]], align 4
10803 // CHECK17-NEXT: [[TMP78:%.*]] = mul nuw i32 [[TMP0]], 4
10804 // CHECK17-NEXT: [[TMP79:%.*]] = sext i32 [[TMP78]] to i64
10805 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES22]], ptr align 4 @.offload_sizes.3, i32 32, i1 false)
10806 // CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
10807 // CHECK17-NEXT: store i32 [[TMP75]], ptr [[TMP80]], align 4
10808 // CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
10809 // CHECK17-NEXT: store i32 [[TMP75]], ptr [[TMP81]], align 4
10810 // CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
10811 // CHECK17-NEXT: store ptr null, ptr [[TMP82]], align 4
10812 // CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
10813 // CHECK17-NEXT: store i32 [[TMP77]], ptr [[TMP83]], align 4
10814 // CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
10815 // CHECK17-NEXT: store i32 [[TMP77]], ptr [[TMP84]], align 4
10816 // CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
10817 // CHECK17-NEXT: store ptr null, ptr [[TMP85]], align 4
10818 // CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
10819 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP86]], align 4
10820 // CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
10821 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP87]], align 4
10822 // CHECK17-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
10823 // CHECK17-NEXT: store ptr null, ptr [[TMP88]], align 4
10824 // CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
10825 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP89]], align 4
10826 // CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
10827 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 4
10828 // CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
10829 // CHECK17-NEXT: store i64 [[TMP79]], ptr [[TMP91]], align 4
10830 // CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
10831 // CHECK17-NEXT: store ptr null, ptr [[TMP92]], align 4
10832 // CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
10833 // CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
10834 // CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
10835 // CHECK17-NEXT: [[TMP96:%.*]] = load i32, ptr [[N]], align 4
10836 // CHECK17-NEXT: store i32 [[TMP96]], ptr [[DOTCAPTURE_EXPR_24]], align 4
10837 // CHECK17-NEXT: [[TMP97:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
10838 // CHECK17-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP97]], 0
10839 // CHECK17-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
10840 // CHECK17-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
10841 // CHECK17-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
10842 // CHECK17-NEXT: [[TMP98:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
10843 // CHECK17-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP98]], 1
10844 // CHECK17-NEXT: [[TMP99:%.*]] = zext i32 [[ADD29]] to i64
10845 // CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
10846 // CHECK17-NEXT: store i32 3, ptr [[TMP100]], align 4
10847 // CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
10848 // CHECK17-NEXT: store i32 4, ptr [[TMP101]], align 4
10849 // CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
10850 // CHECK17-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 4
10851 // CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
10852 // CHECK17-NEXT: store ptr [[TMP94]], ptr [[TMP103]], align 4
10853 // CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
10854 // CHECK17-NEXT: store ptr [[TMP95]], ptr [[TMP104]], align 4
10855 // CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
10856 // CHECK17-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP105]], align 4
10857 // CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
10858 // CHECK17-NEXT: store ptr null, ptr [[TMP106]], align 4
10859 // CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
10860 // CHECK17-NEXT: store ptr null, ptr [[TMP107]], align 4
10861 // CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
10862 // CHECK17-NEXT: store i64 [[TMP99]], ptr [[TMP108]], align 8
10863 // CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
10864 // CHECK17-NEXT: store i64 0, ptr [[TMP109]], align 8
10865 // CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
10866 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP110]], align 4
10867 // CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
10868 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP111]], align 4
10869 // CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
10870 // CHECK17-NEXT: store i32 0, ptr [[TMP112]], align 4
10871 // CHECK17-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
10872 // CHECK17-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0
10873 // CHECK17-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
10874 // CHECK17: omp_offload.failed31:
10875 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP75]], i32 [[TMP77]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
10876 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT32]]
10877 // CHECK17: omp_offload.cont32:
10878 // CHECK17-NEXT: [[TMP115:%.*]] = load i32, ptr [[N]], align 4
10879 // CHECK17-NEXT: store i32 [[TMP115]], ptr [[N_CASTED33]], align 4
10880 // CHECK17-NEXT: [[TMP116:%.*]] = load i32, ptr [[N_CASTED33]], align 4
10881 // CHECK17-NEXT: [[TMP117:%.*]] = mul nuw i32 [[TMP0]], 4
10882 // CHECK17-NEXT: [[TMP118:%.*]] = sext i32 [[TMP117]] to i64
10883 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES37]], ptr align 4 @.offload_sizes.5, i32 24, i1 false)
10884 // CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
10885 // CHECK17-NEXT: store i32 [[TMP116]], ptr [[TMP119]], align 4
10886 // CHECK17-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
10887 // CHECK17-NEXT: store i32 [[TMP116]], ptr [[TMP120]], align 4
10888 // CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 0
10889 // CHECK17-NEXT: store ptr null, ptr [[TMP121]], align 4
10890 // CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
10891 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP122]], align 4
10892 // CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
10893 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP123]], align 4
10894 // CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 1
10895 // CHECK17-NEXT: store ptr null, ptr [[TMP124]], align 4
10896 // CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
10897 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP125]], align 4
10898 // CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
10899 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP126]], align 4
10900 // CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
10901 // CHECK17-NEXT: store i64 [[TMP118]], ptr [[TMP127]], align 4
10902 // CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 2
10903 // CHECK17-NEXT: store ptr null, ptr [[TMP128]], align 4
10904 // CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
10905 // CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
10906 // CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
10907 // CHECK17-NEXT: [[TMP132:%.*]] = load i32, ptr [[N]], align 4
10908 // CHECK17-NEXT: store i32 [[TMP132]], ptr [[DOTCAPTURE_EXPR_39]], align 4
10909 // CHECK17-NEXT: [[TMP133:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
10910 // CHECK17-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP133]], 0
10911 // CHECK17-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
10912 // CHECK17-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
10913 // CHECK17-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
10914 // CHECK17-NEXT: [[TMP134:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
10915 // CHECK17-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP134]], 1
10916 // CHECK17-NEXT: [[TMP135:%.*]] = zext i32 [[ADD44]] to i64
10917 // CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
10918 // CHECK17-NEXT: store i32 3, ptr [[TMP136]], align 4
10919 // CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
10920 // CHECK17-NEXT: store i32 3, ptr [[TMP137]], align 4
10921 // CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
10922 // CHECK17-NEXT: store ptr [[TMP129]], ptr [[TMP138]], align 4
10923 // CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
10924 // CHECK17-NEXT: store ptr [[TMP130]], ptr [[TMP139]], align 4
10925 // CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
10926 // CHECK17-NEXT: store ptr [[TMP131]], ptr [[TMP140]], align 4
10927 // CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
10928 // CHECK17-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP141]], align 4
10929 // CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
10930 // CHECK17-NEXT: store ptr null, ptr [[TMP142]], align 4
10931 // CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
10932 // CHECK17-NEXT: store ptr null, ptr [[TMP143]], align 4
10933 // CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
10934 // CHECK17-NEXT: store i64 [[TMP135]], ptr [[TMP144]], align 8
10935 // CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
10936 // CHECK17-NEXT: store i64 0, ptr [[TMP145]], align 8
10937 // CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
10938 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP146]], align 4
10939 // CHECK17-NEXT: [[TMP147:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
10940 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP147]], align 4
10941 // CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
10942 // CHECK17-NEXT: store i32 0, ptr [[TMP148]], align 4
10943 // CHECK17-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
10944 // CHECK17-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0
10945 // CHECK17-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
10946 // CHECK17: omp_offload.failed46:
10947 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP116]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
10948 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT47]]
10949 // CHECK17: omp_offload.cont47:
10950 // CHECK17-NEXT: [[TMP151:%.*]] = load i32, ptr [[M]], align 4
10951 // CHECK17-NEXT: store i32 [[TMP151]], ptr [[M_CASTED48]], align 4
10952 // CHECK17-NEXT: [[TMP152:%.*]] = load i32, ptr [[M_CASTED48]], align 4
10953 // CHECK17-NEXT: [[TMP153:%.*]] = load i32, ptr [[N]], align 4
10954 // CHECK17-NEXT: store i32 [[TMP153]], ptr [[N_CASTED49]], align 4
10955 // CHECK17-NEXT: [[TMP154:%.*]] = load i32, ptr [[N_CASTED49]], align 4
10956 // CHECK17-NEXT: [[TMP155:%.*]] = mul nuw i32 [[TMP0]], 4
10957 // CHECK17-NEXT: [[TMP156:%.*]] = sext i32 [[TMP155]] to i64
10958 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES53]], ptr align 4 @.offload_sizes.7, i32 32, i1 false)
10959 // CHECK17-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
10960 // CHECK17-NEXT: store i32 [[TMP152]], ptr [[TMP157]], align 4
10961 // CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
10962 // CHECK17-NEXT: store i32 [[TMP152]], ptr [[TMP158]], align 4
10963 // CHECK17-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 0
10964 // CHECK17-NEXT: store ptr null, ptr [[TMP159]], align 4
10965 // CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
10966 // CHECK17-NEXT: store i32 [[TMP154]], ptr [[TMP160]], align 4
10967 // CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
10968 // CHECK17-NEXT: store i32 [[TMP154]], ptr [[TMP161]], align 4
10969 // CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 1
10970 // CHECK17-NEXT: store ptr null, ptr [[TMP162]], align 4
10971 // CHECK17-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
10972 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP163]], align 4
10973 // CHECK17-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
10974 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP164]], align 4
10975 // CHECK17-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 2
10976 // CHECK17-NEXT: store ptr null, ptr [[TMP165]], align 4
10977 // CHECK17-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
10978 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP166]], align 4
10979 // CHECK17-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
10980 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP167]], align 4
10981 // CHECK17-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
10982 // CHECK17-NEXT: store i64 [[TMP156]], ptr [[TMP168]], align 4
10983 // CHECK17-NEXT: [[TMP169:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 3
10984 // CHECK17-NEXT: store ptr null, ptr [[TMP169]], align 4
10985 // CHECK17-NEXT: [[TMP170:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
10986 // CHECK17-NEXT: [[TMP171:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
10987 // CHECK17-NEXT: [[TMP172:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
10988 // CHECK17-NEXT: [[TMP173:%.*]] = load i32, ptr [[N]], align 4
10989 // CHECK17-NEXT: store i32 [[TMP173]], ptr [[DOTCAPTURE_EXPR_55]], align 4
10990 // CHECK17-NEXT: [[TMP174:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
10991 // CHECK17-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP174]], 0
10992 // CHECK17-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
10993 // CHECK17-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
10994 // CHECK17-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
10995 // CHECK17-NEXT: [[TMP175:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
10996 // CHECK17-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP175]], 1
10997 // CHECK17-NEXT: [[TMP176:%.*]] = zext i32 [[ADD60]] to i64
10998 // CHECK17-NEXT: [[TMP177:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
10999 // CHECK17-NEXT: store i32 3, ptr [[TMP177]], align 4
11000 // CHECK17-NEXT: [[TMP178:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
11001 // CHECK17-NEXT: store i32 4, ptr [[TMP178]], align 4
11002 // CHECK17-NEXT: [[TMP179:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
11003 // CHECK17-NEXT: store ptr [[TMP170]], ptr [[TMP179]], align 4
11004 // CHECK17-NEXT: [[TMP180:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
11005 // CHECK17-NEXT: store ptr [[TMP171]], ptr [[TMP180]], align 4
11006 // CHECK17-NEXT: [[TMP181:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
11007 // CHECK17-NEXT: store ptr [[TMP172]], ptr [[TMP181]], align 4
11008 // CHECK17-NEXT: [[TMP182:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
11009 // CHECK17-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP182]], align 4
11010 // CHECK17-NEXT: [[TMP183:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
11011 // CHECK17-NEXT: store ptr null, ptr [[TMP183]], align 4
11012 // CHECK17-NEXT: [[TMP184:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
11013 // CHECK17-NEXT: store ptr null, ptr [[TMP184]], align 4
11014 // CHECK17-NEXT: [[TMP185:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
11015 // CHECK17-NEXT: store i64 [[TMP176]], ptr [[TMP185]], align 8
11016 // CHECK17-NEXT: [[TMP186:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
11017 // CHECK17-NEXT: store i64 0, ptr [[TMP186]], align 8
11018 // CHECK17-NEXT: [[TMP187:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
11019 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP187]], align 4
11020 // CHECK17-NEXT: [[TMP188:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
11021 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP188]], align 4
11022 // CHECK17-NEXT: [[TMP189:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
11023 // CHECK17-NEXT: store i32 0, ptr [[TMP189]], align 4
11024 // CHECK17-NEXT: [[TMP190:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
11025 // CHECK17-NEXT: [[TMP191:%.*]] = icmp ne i32 [[TMP190]], 0
11026 // CHECK17-NEXT: br i1 [[TMP191]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
11027 // CHECK17: omp_offload.failed62:
11028 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP152]], i32 [[TMP154]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
11029 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT63]]
11030 // CHECK17: omp_offload.cont63:
11031 // CHECK17-NEXT: [[TMP192:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
11032 // CHECK17-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]])
11033 // CHECK17-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
11034 // CHECK17-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4
11035 // CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]])
11036 // CHECK17-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4
11037 // CHECK17-NEXT: ret i32 [[TMP194]]
11040 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
11041 // CHECK17-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
11042 // CHECK17-NEXT: entry:
11043 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11044 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11045 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11046 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11047 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11048 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11049 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11050 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11051 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
11052 // CHECK17-NEXT: ret void
11055 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
11056 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11057 // CHECK17-NEXT: entry:
11058 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11059 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11060 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11061 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11062 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11063 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11064 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11065 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11066 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11067 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11068 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11069 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11070 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11071 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11072 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11073 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11074 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11075 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11076 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11077 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11078 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11079 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11080 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11081 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11082 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11083 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11084 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11085 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11086 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11087 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11088 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11089 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11090 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11091 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11092 // CHECK17: omp.precond.then:
11093 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11094 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11095 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11096 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11097 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11098 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11099 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
11100 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11101 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11102 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11103 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
11104 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11105 // CHECK17: cond.true:
11106 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11107 // CHECK17-NEXT: br label [[COND_END:%.*]]
11108 // CHECK17: cond.false:
11109 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11110 // CHECK17-NEXT: br label [[COND_END]]
11111 // CHECK17: cond.end:
11112 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
11113 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11114 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11115 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
11116 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11117 // CHECK17: omp.inner.for.cond:
11118 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]]
11119 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
11120 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
11121 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11122 // CHECK17: omp.inner.for.body:
11123 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP14]]
11124 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
11125 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP14]]
11126 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11127 // CHECK17: omp.inner.for.inc:
11128 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
11129 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP14]]
11130 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11131 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
11132 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
11133 // CHECK17: omp.inner.for.end:
11134 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11135 // CHECK17: omp.loop.exit:
11136 // CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11137 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
11138 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
11139 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11140 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11141 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11142 // CHECK17: .omp.final.then:
11143 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11144 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
11145 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11146 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11147 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11148 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
11149 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11150 // CHECK17: .omp.final.done:
11151 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11152 // CHECK17: omp.precond.end:
11153 // CHECK17-NEXT: ret void
11156 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
11157 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11158 // CHECK17-NEXT: entry:
11159 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11160 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11161 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11162 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11163 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11164 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11165 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11166 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11167 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11168 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11169 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11170 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11171 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11172 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11173 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11174 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11175 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11176 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11177 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11178 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11179 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11180 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11181 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11182 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11183 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11184 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11185 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11186 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11187 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11188 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11189 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11190 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11191 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11192 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11193 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11194 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11195 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11196 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11197 // CHECK17: omp.precond.then:
11198 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11199 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11200 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11201 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11202 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11203 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11204 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11205 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11206 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11207 // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11208 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
11209 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11210 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11211 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11212 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
11213 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11214 // CHECK17: cond.true:
11215 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11216 // CHECK17-NEXT: br label [[COND_END:%.*]]
11217 // CHECK17: cond.false:
11218 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11219 // CHECK17-NEXT: br label [[COND_END]]
11220 // CHECK17: cond.end:
11221 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
11222 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
11223 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11224 // CHECK17-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
11225 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11226 // CHECK17: omp.inner.for.cond:
11227 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
11228 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
11229 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
11230 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11231 // CHECK17: omp.inner.for.body:
11232 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
11233 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
11234 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11235 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
11236 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
11237 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
11238 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
11239 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11240 // CHECK17: omp.body.continue:
11241 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11242 // CHECK17: omp.inner.for.inc:
11243 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
11244 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
11245 // CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
11246 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
11247 // CHECK17: omp.inner.for.end:
11248 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11249 // CHECK17: omp.loop.exit:
11250 // CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11251 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
11252 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
11253 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11254 // CHECK17-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11255 // CHECK17-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11256 // CHECK17: .omp.final.then:
11257 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11258 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
11259 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11260 // CHECK17-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
11261 // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
11262 // CHECK17-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
11263 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11264 // CHECK17: .omp.final.done:
11265 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11266 // CHECK17: omp.precond.end:
11267 // CHECK17-NEXT: ret void
11270 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
11271 // CHECK17-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11272 // CHECK17-NEXT: entry:
11273 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11274 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11275 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11276 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11277 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11278 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11279 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11280 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11281 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
11282 // CHECK17-NEXT: ret void
11285 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
11286 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11287 // CHECK17-NEXT: entry:
11288 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11289 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11290 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11291 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11292 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11293 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11294 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11295 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11296 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11297 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11298 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11299 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11300 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11301 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11302 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11303 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11304 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11305 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11306 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11307 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11308 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11309 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11310 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11311 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11312 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11313 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11314 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11315 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11316 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11317 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11318 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11319 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11320 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11321 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11322 // CHECK17: omp.precond.then:
11323 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11324 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11325 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11326 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11327 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11328 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11329 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
11330 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11331 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11332 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11333 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
11334 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11335 // CHECK17: cond.true:
11336 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11337 // CHECK17-NEXT: br label [[COND_END:%.*]]
11338 // CHECK17: cond.false:
11339 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11340 // CHECK17-NEXT: br label [[COND_END]]
11341 // CHECK17: cond.end:
11342 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
11343 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11344 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11345 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
11346 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11347 // CHECK17: omp.inner.for.cond:
11348 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
11349 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
11350 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
11351 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11352 // CHECK17: omp.inner.for.body:
11353 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
11354 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
11355 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP23]]
11356 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11357 // CHECK17: omp.inner.for.inc:
11358 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
11359 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
11360 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11361 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
11362 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
11363 // CHECK17: omp.inner.for.end:
11364 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11365 // CHECK17: omp.loop.exit:
11366 // CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11367 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
11368 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
11369 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11370 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11371 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11372 // CHECK17: .omp.final.then:
11373 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11374 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
11375 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11376 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11377 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11378 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
11379 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11380 // CHECK17: .omp.final.done:
11381 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11382 // CHECK17: omp.precond.end:
11383 // CHECK17-NEXT: ret void
11386 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
11387 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11388 // CHECK17-NEXT: entry:
11389 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11390 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11391 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11392 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11393 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11394 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11395 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11396 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11397 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11398 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11399 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11400 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11401 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11402 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11403 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11404 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11405 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11406 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11407 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11408 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11409 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11410 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11411 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11412 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11413 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11414 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11415 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11416 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11417 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11418 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11419 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11420 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11421 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11422 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11423 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11424 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11425 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11426 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11427 // CHECK17: omp.precond.then:
11428 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11429 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11430 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11431 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11432 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11433 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11434 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11435 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11436 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11437 // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11438 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
11439 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11440 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11441 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11442 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
11443 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11444 // CHECK17: cond.true:
11445 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11446 // CHECK17-NEXT: br label [[COND_END:%.*]]
11447 // CHECK17: cond.false:
11448 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11449 // CHECK17-NEXT: br label [[COND_END]]
11450 // CHECK17: cond.end:
11451 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
11452 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
11453 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11454 // CHECK17-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
11455 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11456 // CHECK17: omp.inner.for.cond:
11457 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
11458 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
11459 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
11460 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11461 // CHECK17: omp.inner.for.body:
11462 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
11463 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
11464 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11465 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
11466 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
11467 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
11468 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
11469 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11470 // CHECK17: omp.body.continue:
11471 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11472 // CHECK17: omp.inner.for.inc:
11473 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
11474 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
11475 // CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
11476 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
11477 // CHECK17: omp.inner.for.end:
11478 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11479 // CHECK17: omp.loop.exit:
11480 // CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11481 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
11482 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
11483 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11484 // CHECK17-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11485 // CHECK17-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11486 // CHECK17: .omp.final.then:
11487 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11488 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
11489 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11490 // CHECK17-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
11491 // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
11492 // CHECK17-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
11493 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11494 // CHECK17: .omp.final.done:
11495 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11496 // CHECK17: omp.precond.end:
11497 // CHECK17-NEXT: ret void
11500 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
11501 // CHECK17-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11502 // CHECK17-NEXT: entry:
11503 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
11504 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11505 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11506 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11507 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11508 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11509 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
11510 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11511 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11512 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11513 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11514 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11515 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
11516 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
11517 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11518 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
11519 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
11520 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
11521 // CHECK17-NEXT: ret void
11524 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
11525 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
11526 // CHECK17-NEXT: entry:
11527 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11528 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11529 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11530 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11531 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11532 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11533 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11534 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11535 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11536 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11537 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11538 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11539 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11540 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11541 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11542 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
11543 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11544 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11545 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11546 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11547 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11548 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11549 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
11550 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11551 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11552 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11553 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11554 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11555 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11556 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11557 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11558 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11559 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
11560 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11561 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11562 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11563 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11564 // CHECK17: omp.precond.then:
11565 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11566 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11567 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11568 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11569 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11570 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
11571 // CHECK17-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11572 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
11573 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
11574 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11575 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11576 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11577 // CHECK17-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11578 // CHECK17: cond.true:
11579 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11580 // CHECK17-NEXT: br label [[COND_END:%.*]]
11581 // CHECK17: cond.false:
11582 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11583 // CHECK17-NEXT: br label [[COND_END]]
11584 // CHECK17: cond.end:
11585 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11586 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11587 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11588 // CHECK17-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
11589 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11590 // CHECK17: omp.inner.for.cond:
11591 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
11592 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
11593 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
11594 // CHECK17-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
11595 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11596 // CHECK17: omp.inner.for.body:
11597 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11598 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11599 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP29]]
11600 // CHECK17-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
11601 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
11602 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i32 [[TMP17]], i32 [[TMP18]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP20]]), !llvm.access.group [[ACC_GRP29]]
11603 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11604 // CHECK17: omp.inner.for.inc:
11605 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
11606 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
11607 // CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
11608 // CHECK17-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
11609 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11610 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
11611 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
11612 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11613 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11614 // CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
11615 // CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
11616 // CHECK17-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11617 // CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11618 // CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
11619 // CHECK17-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
11620 // CHECK17-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
11621 // CHECK17: cond.true11:
11622 // CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
11623 // CHECK17-NEXT: br label [[COND_END13:%.*]]
11624 // CHECK17: cond.false12:
11625 // CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11626 // CHECK17-NEXT: br label [[COND_END13]]
11627 // CHECK17: cond.end13:
11628 // CHECK17-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE11]] ], [ [[TMP30]], [[COND_FALSE12]] ]
11629 // CHECK17-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11630 // CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11631 // CHECK17-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
11632 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
11633 // CHECK17: omp.inner.for.end:
11634 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11635 // CHECK17: omp.loop.exit:
11636 // CHECK17-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11637 // CHECK17-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
11638 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP33]])
11639 // CHECK17-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11640 // CHECK17-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
11641 // CHECK17-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11642 // CHECK17: .omp.final.then:
11643 // CHECK17-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11644 // CHECK17-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP36]], 0
11645 // CHECK17-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
11646 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
11647 // CHECK17-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
11648 // CHECK17-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
11649 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11650 // CHECK17: .omp.final.done:
11651 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11652 // CHECK17: omp.precond.end:
11653 // CHECK17-NEXT: ret void
11656 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
11657 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
11658 // CHECK17-NEXT: entry:
11659 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11660 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11661 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11662 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11663 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11664 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11665 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11666 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11667 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11668 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11669 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11670 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11671 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11672 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11673 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11674 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11675 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11676 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
11677 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11678 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11679 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11680 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11681 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11682 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11683 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11684 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
11685 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11686 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11687 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11688 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11689 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11690 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11691 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11692 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11693 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11694 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
11695 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11696 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11697 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11698 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11699 // CHECK17: omp.precond.then:
11700 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11701 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11702 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11703 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11704 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11705 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11706 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11707 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11708 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11709 // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11710 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
11711 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11712 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11713 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11714 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
11715 // CHECK17-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11716 // CHECK17: cond.true:
11717 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11718 // CHECK17-NEXT: br label [[COND_END:%.*]]
11719 // CHECK17: cond.false:
11720 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11721 // CHECK17-NEXT: br label [[COND_END]]
11722 // CHECK17: cond.end:
11723 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
11724 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
11725 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11726 // CHECK17-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
11727 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11728 // CHECK17: omp.inner.for.cond:
11729 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
11730 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
11731 // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
11732 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11733 // CHECK17: omp.inner.for.body:
11734 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
11735 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
11736 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11737 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
11738 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
11739 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
11740 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
11741 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11742 // CHECK17: omp.body.continue:
11743 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11744 // CHECK17: omp.inner.for.inc:
11745 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
11746 // CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
11747 // CHECK17-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
11748 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
11749 // CHECK17: omp.inner.for.end:
11750 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11751 // CHECK17: omp.loop.exit:
11752 // CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11753 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
11754 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
11755 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11756 // CHECK17-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11757 // CHECK17-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11758 // CHECK17: .omp.final.then:
11759 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11760 // CHECK17-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
11761 // CHECK17-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
11762 // CHECK17-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
11763 // CHECK17-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
11764 // CHECK17-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
11765 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11766 // CHECK17: .omp.final.done:
11767 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11768 // CHECK17: omp.precond.end:
11769 // CHECK17-NEXT: ret void
11772 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
11773 // CHECK17-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11774 // CHECK17-NEXT: entry:
11775 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11776 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11777 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11778 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11779 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11780 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11781 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11782 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11783 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
11784 // CHECK17-NEXT: ret void
11787 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
11788 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11789 // CHECK17-NEXT: entry:
11790 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11791 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11792 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11793 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11794 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11795 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11796 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11797 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11798 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11799 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11800 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11801 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11802 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11803 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11804 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11805 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11806 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11807 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11808 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11809 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11810 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11811 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11812 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11813 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11814 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11815 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11816 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11817 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11818 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11819 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11820 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11821 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11822 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11823 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11824 // CHECK17: omp.precond.then:
11825 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11826 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11827 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11828 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11829 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11830 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11831 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
11832 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11833 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11834 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11835 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
11836 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11837 // CHECK17: cond.true:
11838 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11839 // CHECK17-NEXT: br label [[COND_END:%.*]]
11840 // CHECK17: cond.false:
11841 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11842 // CHECK17-NEXT: br label [[COND_END]]
11843 // CHECK17: cond.end:
11844 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
11845 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11846 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11847 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
11848 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11849 // CHECK17: omp.inner.for.cond:
11850 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
11851 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
11852 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
11853 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11854 // CHECK17: omp.inner.for.body:
11855 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
11856 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
11857 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP35]]
11858 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11859 // CHECK17: omp.inner.for.inc:
11860 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
11861 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
11862 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11863 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
11864 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
11865 // CHECK17: omp.inner.for.end:
11866 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11867 // CHECK17: omp.loop.exit:
11868 // CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11869 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
11870 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
11871 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11872 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11873 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11874 // CHECK17: .omp.final.then:
11875 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11876 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
11877 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11878 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11879 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11880 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
11881 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11882 // CHECK17: .omp.final.done:
11883 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11884 // CHECK17: omp.precond.end:
11885 // CHECK17-NEXT: ret void
11888 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
11889 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11890 // CHECK17-NEXT: entry:
11891 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11892 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11893 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11894 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11895 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11896 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11897 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11898 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11899 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11900 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11901 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11902 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11903 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11904 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11905 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11906 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11907 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11908 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11909 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11910 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11911 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11912 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11913 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11914 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11915 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11916 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11917 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11918 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11919 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11920 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11921 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11922 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11923 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11924 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11925 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11926 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11927 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11928 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11929 // CHECK17: omp.precond.then:
11930 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11931 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11932 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11933 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11934 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11935 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11936 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11937 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11938 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11939 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11940 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11941 // CHECK17-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11942 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
11943 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
11944 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
11945 // CHECK17: omp.dispatch.cond:
11946 // CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11947 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
11948 // CHECK17-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
11949 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
11950 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11951 // CHECK17: omp.dispatch.body:
11952 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11953 // CHECK17-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
11954 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11955 // CHECK17: omp.inner.for.cond:
11956 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
11957 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
11958 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11959 // CHECK17-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11960 // CHECK17: omp.inner.for.body:
11961 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
11962 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11963 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11964 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
11965 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
11966 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP20]]
11967 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
11968 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11969 // CHECK17: omp.body.continue:
11970 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11971 // CHECK17: omp.inner.for.inc:
11972 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
11973 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP21]], 1
11974 // CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
11975 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
11976 // CHECK17: omp.inner.for.end:
11977 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
11978 // CHECK17: omp.dispatch.inc:
11979 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
11980 // CHECK17: omp.dispatch.end:
11981 // CHECK17-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11982 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
11983 // CHECK17-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP23]])
11984 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11985 // CHECK17-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
11986 // CHECK17-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11987 // CHECK17: .omp.final.then:
11988 // CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11989 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
11990 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11991 // CHECK17-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
11992 // CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
11993 // CHECK17-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
11994 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11995 // CHECK17: .omp.final.done:
11996 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11997 // CHECK17: omp.precond.end:
11998 // CHECK17-NEXT: ret void
12001 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
12002 // CHECK17-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12003 // CHECK17-NEXT: entry:
12004 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
12005 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
12006 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
12007 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12008 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12009 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12010 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
12011 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
12012 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
12013 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12014 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
12015 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12016 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
12017 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
12018 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
12019 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12020 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12021 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
12022 // CHECK17-NEXT: ret void
12025 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
12026 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12027 // CHECK17-NEXT: entry:
12028 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12029 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12030 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
12031 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
12032 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12033 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12034 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12035 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12036 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12037 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12038 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12039 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12040 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12041 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12042 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12043 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
12044 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12045 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12046 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12047 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
12048 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
12049 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12050 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12051 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
12052 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
12053 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12054 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
12055 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
12056 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12057 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
12058 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12059 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12060 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
12061 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
12062 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12063 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
12064 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12065 // CHECK17: omp.precond.then:
12066 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12067 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12068 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
12069 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12070 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12071 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12072 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
12073 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12074 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12075 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12076 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
12077 // CHECK17-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12078 // CHECK17: cond.true:
12079 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12080 // CHECK17-NEXT: br label [[COND_END:%.*]]
12081 // CHECK17: cond.false:
12082 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12083 // CHECK17-NEXT: br label [[COND_END]]
12084 // CHECK17: cond.end:
12085 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
12086 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12087 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12088 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
12089 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12090 // CHECK17: omp.inner.for.cond:
12091 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]]
12092 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
12093 // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
12094 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12095 // CHECK17: omp.inner.for.body:
12096 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP41]]
12097 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
12098 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP41]]
12099 // CHECK17-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
12100 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
12101 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP19]]), !llvm.access.group [[ACC_GRP41]]
12102 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12103 // CHECK17: omp.inner.for.inc:
12104 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
12105 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP41]]
12106 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
12107 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
12108 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
12109 // CHECK17: omp.inner.for.end:
12110 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12111 // CHECK17: omp.loop.exit:
12112 // CHECK17-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12113 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
12114 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
12115 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12116 // CHECK17-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
12117 // CHECK17-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12118 // CHECK17: .omp.final.then:
12119 // CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12120 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP26]], 0
12121 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12122 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
12123 // CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
12124 // CHECK17-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
12125 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12126 // CHECK17: .omp.final.done:
12127 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
12128 // CHECK17: omp.precond.end:
12129 // CHECK17-NEXT: ret void
12132 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
12133 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12134 // CHECK17-NEXT: entry:
12135 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12136 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12137 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12138 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12139 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
12140 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
12141 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12142 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12143 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12144 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12145 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12146 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12147 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12148 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12149 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12150 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12151 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12152 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
12153 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12154 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12155 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12156 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12157 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
12158 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
12159 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12160 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12161 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
12162 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
12163 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12164 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
12165 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
12166 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12167 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
12168 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12169 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12170 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
12171 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
12172 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12173 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
12174 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12175 // CHECK17: omp.precond.then:
12176 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12177 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12178 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
12179 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12180 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12181 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
12182 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
12183 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12184 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12185 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12186 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12187 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12188 // CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12189 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
12190 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
12191 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
12192 // CHECK17: omp.dispatch.cond:
12193 // CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12194 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
12195 // CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
12196 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
12197 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12198 // CHECK17: omp.dispatch.body:
12199 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12200 // CHECK17-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
12201 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12202 // CHECK17: omp.inner.for.cond:
12203 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]]
12204 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
12205 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
12206 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12207 // CHECK17: omp.inner.for.body:
12208 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
12209 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
12210 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12211 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
12212 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
12213 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP21]]
12214 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]]
12215 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12216 // CHECK17: omp.body.continue:
12217 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12218 // CHECK17: omp.inner.for.inc:
12219 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
12220 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP22]], 1
12221 // CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
12222 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
12223 // CHECK17: omp.inner.for.end:
12224 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
12225 // CHECK17: omp.dispatch.inc:
12226 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
12227 // CHECK17: omp.dispatch.end:
12228 // CHECK17-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12229 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
12230 // CHECK17-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP24]])
12231 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12232 // CHECK17-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
12233 // CHECK17-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12234 // CHECK17: .omp.final.then:
12235 // CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12236 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
12237 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12238 // CHECK17-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
12239 // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
12240 // CHECK17-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
12241 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12242 // CHECK17: .omp.final.done:
12243 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
12244 // CHECK17: omp.precond.end:
12245 // CHECK17-NEXT: ret void
12248 // CHECK17-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
12249 // CHECK17-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
12250 // CHECK17-NEXT: entry:
12251 // CHECK17-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
12252 // CHECK17-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
12253 // CHECK17-NEXT: [[M:%.*]] = alloca i32, align 4
12254 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
12255 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
12256 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
12257 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12258 // CHECK17-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
12259 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 4
12260 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 4
12261 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 4
12262 // CHECK17-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
12263 // CHECK17-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12264 // CHECK17-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
12265 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 4
12266 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 4
12267 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 4
12268 // CHECK17-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
12269 // CHECK17-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12270 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 4
12271 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 4
12272 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 4
12273 // CHECK17-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
12274 // CHECK17-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12275 // CHECK17-NEXT: [[M_CASTED22:%.*]] = alloca i32, align 4
12276 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 4
12277 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 4
12278 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 4
12279 // CHECK17-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
12280 // CHECK17-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12281 // CHECK17-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
12282 // CHECK17-NEXT: store i32 10, ptr [[M]], align 4
12283 // CHECK17-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12284 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP0]], align 4
12285 // CHECK17-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12286 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
12287 // CHECK17-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12288 // CHECK17-NEXT: store ptr null, ptr [[TMP2]], align 4
12289 // CHECK17-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12290 // CHECK17-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12291 // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
12292 // CHECK17-NEXT: store i32 3, ptr [[TMP5]], align 4
12293 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
12294 // CHECK17-NEXT: store i32 1, ptr [[TMP6]], align 4
12295 // CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
12296 // CHECK17-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
12297 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
12298 // CHECK17-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
12299 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
12300 // CHECK17-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 4
12301 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
12302 // CHECK17-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 4
12303 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
12304 // CHECK17-NEXT: store ptr null, ptr [[TMP11]], align 4
12305 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
12306 // CHECK17-NEXT: store ptr null, ptr [[TMP12]], align 4
12307 // CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
12308 // CHECK17-NEXT: store i64 10, ptr [[TMP13]], align 8
12309 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
12310 // CHECK17-NEXT: store i64 0, ptr [[TMP14]], align 8
12311 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
12312 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
12313 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
12314 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
12315 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
12316 // CHECK17-NEXT: store i32 0, ptr [[TMP17]], align 4
12317 // CHECK17-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
12318 // CHECK17-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
12319 // CHECK17-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12320 // CHECK17: omp_offload.failed:
12321 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR3]]
12322 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
12323 // CHECK17: omp_offload.cont:
12324 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
12325 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP20]], align 4
12326 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
12327 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP21]], align 4
12328 // CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
12329 // CHECK17-NEXT: store ptr null, ptr [[TMP22]], align 4
12330 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
12331 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
12332 // CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
12333 // CHECK17-NEXT: store i32 3, ptr [[TMP25]], align 4
12334 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
12335 // CHECK17-NEXT: store i32 1, ptr [[TMP26]], align 4
12336 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
12337 // CHECK17-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
12338 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
12339 // CHECK17-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
12340 // CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
12341 // CHECK17-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 4
12342 // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
12343 // CHECK17-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 4
12344 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
12345 // CHECK17-NEXT: store ptr null, ptr [[TMP31]], align 4
12346 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
12347 // CHECK17-NEXT: store ptr null, ptr [[TMP32]], align 4
12348 // CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
12349 // CHECK17-NEXT: store i64 10, ptr [[TMP33]], align 8
12350 // CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
12351 // CHECK17-NEXT: store i64 0, ptr [[TMP34]], align 8
12352 // CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
12353 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
12354 // CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
12355 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
12356 // CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
12357 // CHECK17-NEXT: store i32 0, ptr [[TMP37]], align 4
12358 // CHECK17-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
12359 // CHECK17-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
12360 // CHECK17-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
12361 // CHECK17: omp_offload.failed6:
12362 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR3]]
12363 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT7]]
12364 // CHECK17: omp_offload.cont7:
12365 // CHECK17-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
12366 // CHECK17-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
12367 // CHECK17-NEXT: [[TMP41:%.*]] = load i32, ptr [[M_CASTED]], align 4
12368 // CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
12369 // CHECK17-NEXT: store i32 [[TMP41]], ptr [[TMP42]], align 4
12370 // CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
12371 // CHECK17-NEXT: store i32 [[TMP41]], ptr [[TMP43]], align 4
12372 // CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 0
12373 // CHECK17-NEXT: store ptr null, ptr [[TMP44]], align 4
12374 // CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
12375 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP45]], align 4
12376 // CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
12377 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP46]], align 4
12378 // CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 1
12379 // CHECK17-NEXT: store ptr null, ptr [[TMP47]], align 4
12380 // CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
12381 // CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
12382 // CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
12383 // CHECK17-NEXT: store i32 3, ptr [[TMP50]], align 4
12384 // CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
12385 // CHECK17-NEXT: store i32 2, ptr [[TMP51]], align 4
12386 // CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
12387 // CHECK17-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 4
12388 // CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
12389 // CHECK17-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 4
12390 // CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
12391 // CHECK17-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 4
12392 // CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
12393 // CHECK17-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 4
12394 // CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
12395 // CHECK17-NEXT: store ptr null, ptr [[TMP56]], align 4
12396 // CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
12397 // CHECK17-NEXT: store ptr null, ptr [[TMP57]], align 4
12398 // CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
12399 // CHECK17-NEXT: store i64 10, ptr [[TMP58]], align 8
12400 // CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
12401 // CHECK17-NEXT: store i64 0, ptr [[TMP59]], align 8
12402 // CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
12403 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
12404 // CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
12405 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
12406 // CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
12407 // CHECK17-NEXT: store i32 0, ptr [[TMP62]], align 4
12408 // CHECK17-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
12409 // CHECK17-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
12410 // CHECK17-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
12411 // CHECK17: omp_offload.failed13:
12412 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i32 [[TMP41]], ptr [[A]]) #[[ATTR3]]
12413 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT14]]
12414 // CHECK17: omp_offload.cont14:
12415 // CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
12416 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP65]], align 4
12417 // CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
12418 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP66]], align 4
12419 // CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0
12420 // CHECK17-NEXT: store ptr null, ptr [[TMP67]], align 4
12421 // CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
12422 // CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
12423 // CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
12424 // CHECK17-NEXT: store i32 3, ptr [[TMP70]], align 4
12425 // CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
12426 // CHECK17-NEXT: store i32 1, ptr [[TMP71]], align 4
12427 // CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
12428 // CHECK17-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 4
12429 // CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
12430 // CHECK17-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 4
12431 // CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
12432 // CHECK17-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 4
12433 // CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
12434 // CHECK17-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 4
12435 // CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
12436 // CHECK17-NEXT: store ptr null, ptr [[TMP76]], align 4
12437 // CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
12438 // CHECK17-NEXT: store ptr null, ptr [[TMP77]], align 4
12439 // CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
12440 // CHECK17-NEXT: store i64 10, ptr [[TMP78]], align 8
12441 // CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
12442 // CHECK17-NEXT: store i64 0, ptr [[TMP79]], align 8
12443 // CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
12444 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
12445 // CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
12446 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
12447 // CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
12448 // CHECK17-NEXT: store i32 0, ptr [[TMP82]], align 4
12449 // CHECK17-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
12450 // CHECK17-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
12451 // CHECK17-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
12452 // CHECK17: omp_offload.failed20:
12453 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR3]]
12454 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT21]]
12455 // CHECK17: omp_offload.cont21:
12456 // CHECK17-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
12457 // CHECK17-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
12458 // CHECK17-NEXT: [[TMP86:%.*]] = load i32, ptr [[M_CASTED22]], align 4
12459 // CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
12460 // CHECK17-NEXT: store i32 [[TMP86]], ptr [[TMP87]], align 4
12461 // CHECK17-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
12462 // CHECK17-NEXT: store i32 [[TMP86]], ptr [[TMP88]], align 4
12463 // CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0
12464 // CHECK17-NEXT: store ptr null, ptr [[TMP89]], align 4
12465 // CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
12466 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP90]], align 4
12467 // CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
12468 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP91]], align 4
12469 // CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1
12470 // CHECK17-NEXT: store ptr null, ptr [[TMP92]], align 4
12471 // CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
12472 // CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
12473 // CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
12474 // CHECK17-NEXT: store i32 3, ptr [[TMP95]], align 4
12475 // CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
12476 // CHECK17-NEXT: store i32 2, ptr [[TMP96]], align 4
12477 // CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
12478 // CHECK17-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 4
12479 // CHECK17-NEXT: [[TMP98:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
12480 // CHECK17-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 4
12481 // CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
12482 // CHECK17-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 4
12483 // CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
12484 // CHECK17-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 4
12485 // CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
12486 // CHECK17-NEXT: store ptr null, ptr [[TMP101]], align 4
12487 // CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
12488 // CHECK17-NEXT: store ptr null, ptr [[TMP102]], align 4
12489 // CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
12490 // CHECK17-NEXT: store i64 10, ptr [[TMP103]], align 8
12491 // CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
12492 // CHECK17-NEXT: store i64 0, ptr [[TMP104]], align 8
12493 // CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
12494 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
12495 // CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
12496 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
12497 // CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
12498 // CHECK17-NEXT: store i32 0, ptr [[TMP107]], align 4
12499 // CHECK17-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
12500 // CHECK17-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
12501 // CHECK17-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
12502 // CHECK17: omp_offload.failed28:
12503 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i32 [[TMP86]], ptr [[A]]) #[[ATTR3]]
12504 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT29]]
12505 // CHECK17: omp_offload.cont29:
12506 // CHECK17-NEXT: ret i32 0
12509 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
12510 // CHECK17-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12511 // CHECK17-NEXT: entry:
12512 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12513 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12514 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12515 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
12516 // CHECK17-NEXT: ret void
12519 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
12520 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12521 // CHECK17-NEXT: entry:
12522 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12523 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12524 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12525 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12526 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12527 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12528 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12529 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12530 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12531 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12532 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12533 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12534 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12535 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12536 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12537 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
12538 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12539 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12540 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12541 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
12542 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12543 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12544 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
12545 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12546 // CHECK17: cond.true:
12547 // CHECK17-NEXT: br label [[COND_END:%.*]]
12548 // CHECK17: cond.false:
12549 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12550 // CHECK17-NEXT: br label [[COND_END]]
12551 // CHECK17: cond.end:
12552 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12553 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12554 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12555 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
12556 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12557 // CHECK17: omp.inner.for.cond:
12558 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]]
12559 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
12560 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
12561 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12562 // CHECK17: omp.inner.for.body:
12563 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP47]]
12564 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
12565 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP47]]
12566 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12567 // CHECK17: omp.inner.for.inc:
12568 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
12569 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP47]]
12570 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
12571 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
12572 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
12573 // CHECK17: omp.inner.for.end:
12574 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12575 // CHECK17: omp.loop.exit:
12576 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
12577 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12578 // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
12579 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12580 // CHECK17: .omp.final.then:
12581 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12582 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12583 // CHECK17: .omp.final.done:
12584 // CHECK17-NEXT: ret void
12587 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
12588 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12589 // CHECK17-NEXT: entry:
12590 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12591 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12592 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12593 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12594 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12595 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12596 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12597 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12598 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12599 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12600 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12601 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12602 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12603 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12604 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12605 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12606 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12607 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12608 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12609 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
12610 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12611 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12612 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
12613 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
12614 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12615 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12616 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12617 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
12618 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12619 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12620 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
12621 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12622 // CHECK17: cond.true:
12623 // CHECK17-NEXT: br label [[COND_END:%.*]]
12624 // CHECK17: cond.false:
12625 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12626 // CHECK17-NEXT: br label [[COND_END]]
12627 // CHECK17: cond.end:
12628 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
12629 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
12630 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12631 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
12632 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12633 // CHECK17: omp.inner.for.cond:
12634 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]]
12635 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
12636 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
12637 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12638 // CHECK17: omp.inner.for.body:
12639 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
12640 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
12641 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12642 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
12643 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
12644 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
12645 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP50]]
12646 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12647 // CHECK17: omp.body.continue:
12648 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12649 // CHECK17: omp.inner.for.inc:
12650 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
12651 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
12652 // CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
12653 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
12654 // CHECK17: omp.inner.for.end:
12655 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12656 // CHECK17: omp.loop.exit:
12657 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
12658 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12659 // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
12660 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12661 // CHECK17: .omp.final.then:
12662 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12663 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12664 // CHECK17: .omp.final.done:
12665 // CHECK17-NEXT: ret void
12668 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
12669 // CHECK17-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12670 // CHECK17-NEXT: entry:
12671 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12672 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12673 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12674 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
12675 // CHECK17-NEXT: ret void
12678 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
12679 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12680 // CHECK17-NEXT: entry:
12681 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12682 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12683 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12684 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12685 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12686 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12687 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12688 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12689 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12690 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12691 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12692 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12693 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12694 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12695 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12696 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
12697 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12698 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12699 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12700 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
12701 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12702 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12703 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
12704 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12705 // CHECK17: cond.true:
12706 // CHECK17-NEXT: br label [[COND_END:%.*]]
12707 // CHECK17: cond.false:
12708 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12709 // CHECK17-NEXT: br label [[COND_END]]
12710 // CHECK17: cond.end:
12711 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12712 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12713 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12714 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
12715 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12716 // CHECK17: omp.inner.for.cond:
12717 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]]
12718 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
12719 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
12720 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12721 // CHECK17: omp.inner.for.body:
12722 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP53]]
12723 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
12724 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP53]]
12725 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12726 // CHECK17: omp.inner.for.inc:
12727 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
12728 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP53]]
12729 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
12730 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
12731 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
12732 // CHECK17: omp.inner.for.end:
12733 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12734 // CHECK17: omp.loop.exit:
12735 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
12736 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12737 // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
12738 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12739 // CHECK17: .omp.final.then:
12740 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12741 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12742 // CHECK17: .omp.final.done:
12743 // CHECK17-NEXT: ret void
12746 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
12747 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12748 // CHECK17-NEXT: entry:
12749 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12750 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12751 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12752 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12753 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12754 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12755 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12756 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12757 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12758 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12759 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12760 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12761 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12762 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12763 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12764 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12765 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12766 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12767 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12768 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
12769 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12770 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12771 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
12772 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
12773 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12774 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12775 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12776 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
12777 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12778 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12779 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
12780 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12781 // CHECK17: cond.true:
12782 // CHECK17-NEXT: br label [[COND_END:%.*]]
12783 // CHECK17: cond.false:
12784 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12785 // CHECK17-NEXT: br label [[COND_END]]
12786 // CHECK17: cond.end:
12787 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
12788 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
12789 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12790 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
12791 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12792 // CHECK17: omp.inner.for.cond:
12793 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56:![0-9]+]]
12794 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP56]]
12795 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
12796 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12797 // CHECK17: omp.inner.for.body:
12798 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
12799 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
12800 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12801 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
12802 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
12803 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
12804 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP56]]
12805 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12806 // CHECK17: omp.body.continue:
12807 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12808 // CHECK17: omp.inner.for.inc:
12809 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
12810 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
12811 // CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
12812 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
12813 // CHECK17: omp.inner.for.end:
12814 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12815 // CHECK17: omp.loop.exit:
12816 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
12817 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12818 // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
12819 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12820 // CHECK17: .omp.final.then:
12821 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12822 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12823 // CHECK17: .omp.final.done:
12824 // CHECK17-NEXT: ret void
12827 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
12828 // CHECK17-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12829 // CHECK17-NEXT: entry:
12830 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
12831 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12832 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12833 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12834 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
12835 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12836 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12837 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
12838 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
12839 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
12840 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12841 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12842 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
12843 // CHECK17-NEXT: ret void
12846 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
12847 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12848 // CHECK17-NEXT: entry:
12849 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12850 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12851 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12852 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12853 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12854 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12855 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12856 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12857 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12858 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12859 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12860 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12861 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12862 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12863 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12864 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12865 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12866 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12867 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
12868 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12869 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12870 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12871 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
12872 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12873 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12874 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
12875 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12876 // CHECK17: cond.true:
12877 // CHECK17-NEXT: br label [[COND_END:%.*]]
12878 // CHECK17: cond.false:
12879 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12880 // CHECK17-NEXT: br label [[COND_END]]
12881 // CHECK17: cond.end:
12882 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12883 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12884 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12885 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
12886 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12887 // CHECK17: omp.inner.for.cond:
12888 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59:![0-9]+]]
12889 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
12890 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
12891 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12892 // CHECK17: omp.inner.for.body:
12893 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP59]]
12894 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
12895 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP59]]
12896 // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
12897 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
12898 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP59]]
12899 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12900 // CHECK17: omp.inner.for.inc:
12901 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
12902 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP59]]
12903 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
12904 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
12905 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
12906 // CHECK17: omp.inner.for.end:
12907 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12908 // CHECK17: omp.loop.exit:
12909 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
12910 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12911 // CHECK17-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
12912 // CHECK17-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12913 // CHECK17: .omp.final.then:
12914 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12915 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12916 // CHECK17: .omp.final.done:
12917 // CHECK17-NEXT: ret void
12920 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
12921 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
12922 // CHECK17-NEXT: entry:
12923 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12924 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12925 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12926 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12927 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12928 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12929 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12930 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12931 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12932 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12933 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12934 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12935 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12936 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12937 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12938 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12939 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12940 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12941 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12942 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12943 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12944 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
12945 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12946 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12947 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
12948 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
12949 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12950 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12951 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12952 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12953 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
12954 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
12955 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
12956 // CHECK17: omp.dispatch.cond:
12957 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12958 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12959 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
12960 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12961 // CHECK17: cond.true:
12962 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12963 // CHECK17-NEXT: br label [[COND_END:%.*]]
12964 // CHECK17: cond.false:
12965 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12966 // CHECK17-NEXT: br label [[COND_END]]
12967 // CHECK17: cond.end:
12968 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
12969 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
12970 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12971 // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
12972 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
12973 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12974 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
12975 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12976 // CHECK17: omp.dispatch.body:
12977 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12978 // CHECK17: omp.inner.for.cond:
12979 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62:![0-9]+]]
12980 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP62]]
12981 // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
12982 // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12983 // CHECK17: omp.inner.for.body:
12984 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
12985 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
12986 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12987 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
12988 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
12989 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP16]]
12990 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP62]]
12991 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12992 // CHECK17: omp.body.continue:
12993 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12994 // CHECK17: omp.inner.for.inc:
12995 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
12996 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
12997 // CHECK17-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
12998 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
12999 // CHECK17: omp.inner.for.end:
13000 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
13001 // CHECK17: omp.dispatch.inc:
13002 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13003 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
13004 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
13005 // CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
13006 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13007 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
13008 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
13009 // CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
13010 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
13011 // CHECK17: omp.dispatch.end:
13012 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP5]])
13013 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13014 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
13015 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13016 // CHECK17: .omp.final.then:
13017 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13018 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13019 // CHECK17: .omp.final.done:
13020 // CHECK17-NEXT: ret void
13023 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
13024 // CHECK17-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13025 // CHECK17-NEXT: entry:
13026 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13027 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13028 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13029 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
13030 // CHECK17-NEXT: ret void
13033 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
13034 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13035 // CHECK17-NEXT: entry:
13036 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13037 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13038 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13039 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13040 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13041 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13042 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13043 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13044 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13045 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13046 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13047 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13048 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13049 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13050 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
13051 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
13052 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13053 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13054 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13055 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
13056 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13057 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13058 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13059 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13060 // CHECK17: cond.true:
13061 // CHECK17-NEXT: br label [[COND_END:%.*]]
13062 // CHECK17: cond.false:
13063 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13064 // CHECK17-NEXT: br label [[COND_END]]
13065 // CHECK17: cond.end:
13066 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13067 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
13068 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
13069 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
13070 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13071 // CHECK17: omp.inner.for.cond:
13072 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65:![0-9]+]]
13073 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
13074 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13075 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13076 // CHECK17: omp.inner.for.body:
13077 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP65]]
13078 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
13079 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP65]]
13080 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13081 // CHECK17: omp.inner.for.inc:
13082 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
13083 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP65]]
13084 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
13085 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
13086 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
13087 // CHECK17: omp.inner.for.end:
13088 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13089 // CHECK17: omp.loop.exit:
13090 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
13091 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13092 // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
13093 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13094 // CHECK17: .omp.final.then:
13095 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13096 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13097 // CHECK17: .omp.final.done:
13098 // CHECK17-NEXT: ret void
13101 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
13102 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13103 // CHECK17-NEXT: entry:
13104 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13105 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13106 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13107 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13108 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13109 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13110 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13111 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13112 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13113 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13114 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13115 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13116 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13117 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13118 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13119 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13120 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13121 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13122 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
13123 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
13124 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13125 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13126 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
13127 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
13128 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13129 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13130 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13131 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13132 // CHECK17-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13133 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
13134 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
13135 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
13136 // CHECK17: omp.dispatch.cond:
13137 // CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
13138 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
13139 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13140 // CHECK17: omp.dispatch.body:
13141 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13142 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
13143 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13144 // CHECK17: omp.inner.for.cond:
13145 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68:![0-9]+]]
13146 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP68]]
13147 // CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
13148 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13149 // CHECK17: omp.inner.for.body:
13150 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
13151 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
13152 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13153 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
13154 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
13155 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP12]]
13156 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP68]]
13157 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13158 // CHECK17: omp.body.continue:
13159 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13160 // CHECK17: omp.inner.for.inc:
13161 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
13162 // CHECK17-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
13163 // CHECK17-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
13164 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
13165 // CHECK17: omp.inner.for.end:
13166 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
13167 // CHECK17: omp.dispatch.inc:
13168 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
13169 // CHECK17: omp.dispatch.end:
13170 // CHECK17-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
13171 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13172 // CHECK17-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13173 // CHECK17-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13174 // CHECK17: .omp.final.then:
13175 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13176 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13177 // CHECK17: .omp.final.done:
13178 // CHECK17-NEXT: ret void
13181 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
13182 // CHECK17-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13183 // CHECK17-NEXT: entry:
13184 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
13185 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13186 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13187 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13188 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
13189 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13190 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13191 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
13192 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
13193 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13194 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
13195 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
13196 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
13197 // CHECK17-NEXT: ret void
13200 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
13201 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13202 // CHECK17-NEXT: entry:
13203 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13204 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13205 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13206 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13207 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13208 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13209 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13210 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13211 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13212 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13213 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13214 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13215 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13216 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13217 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13218 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
13219 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13220 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
13221 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
13222 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13223 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13224 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13225 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
13226 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13227 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13228 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13229 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13230 // CHECK17: cond.true:
13231 // CHECK17-NEXT: br label [[COND_END:%.*]]
13232 // CHECK17: cond.false:
13233 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13234 // CHECK17-NEXT: br label [[COND_END]]
13235 // CHECK17: cond.end:
13236 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13237 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
13238 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
13239 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
13240 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13241 // CHECK17: omp.inner.for.cond:
13242 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71:![0-9]+]]
13243 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
13244 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13245 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13246 // CHECK17: omp.inner.for.body:
13247 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP71]]
13248 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
13249 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP71]]
13250 // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
13251 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
13252 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP71]]
13253 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13254 // CHECK17: omp.inner.for.inc:
13255 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
13256 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP71]]
13257 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
13258 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
13259 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
13260 // CHECK17: omp.inner.for.end:
13261 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13262 // CHECK17: omp.loop.exit:
13263 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
13264 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13265 // CHECK17-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13266 // CHECK17-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13267 // CHECK17: .omp.final.then:
13268 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13269 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13270 // CHECK17: .omp.final.done:
13271 // CHECK17-NEXT: ret void
13274 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
13275 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
13276 // CHECK17-NEXT: entry:
13277 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13278 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13279 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13280 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13281 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13282 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13283 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13284 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13285 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13286 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13287 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13288 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13289 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13290 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13291 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13292 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13293 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13294 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13295 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
13296 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13297 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
13298 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
13299 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13300 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13301 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
13302 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
13303 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13304 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13305 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
13306 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13307 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13308 // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13309 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
13310 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
13311 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
13312 // CHECK17: omp.dispatch.cond:
13313 // CHECK17-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
13314 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
13315 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13316 // CHECK17: omp.dispatch.body:
13317 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13318 // CHECK17-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
13319 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13320 // CHECK17: omp.inner.for.cond:
13321 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74:![0-9]+]]
13322 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP74]]
13323 // CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
13324 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13325 // CHECK17: omp.inner.for.body:
13326 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
13327 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
13328 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13329 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
13330 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
13331 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP13]]
13332 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP74]]
13333 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13334 // CHECK17: omp.body.continue:
13335 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13336 // CHECK17: omp.inner.for.inc:
13337 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
13338 // CHECK17-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
13339 // CHECK17-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
13340 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
13341 // CHECK17: omp.inner.for.end:
13342 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
13343 // CHECK17: omp.dispatch.inc:
13344 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
13345 // CHECK17: omp.dispatch.end:
13346 // CHECK17-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP7]])
13347 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13348 // CHECK17-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
13349 // CHECK17-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13350 // CHECK17: .omp.final.then:
13351 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13352 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13353 // CHECK17: .omp.final.done:
13354 // CHECK17-NEXT: ret void
13357 // CHECK19-LABEL: define {{[^@]+}}@main
13358 // CHECK19-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
13359 // CHECK19-NEXT: entry:
13360 // CHECK19-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
13361 // CHECK19-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
13362 // CHECK19-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 4
13363 // CHECK19-NEXT: [[N:%.*]] = alloca i32, align 4
13364 // CHECK19-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 4
13365 // CHECK19-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
13366 // CHECK19-NEXT: [[M:%.*]] = alloca i32, align 4
13367 // CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
13368 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 4
13369 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 4
13370 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 4
13371 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
13372 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
13373 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13374 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13375 // CHECK19-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
13376 // CHECK19-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
13377 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 4
13378 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 4
13379 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 4
13380 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
13381 // CHECK19-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
13382 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
13383 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
13384 // CHECK19-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13385 // CHECK19-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
13386 // CHECK19-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
13387 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 4
13388 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 4
13389 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 4
13390 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
13391 // CHECK19-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
13392 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
13393 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
13394 // CHECK19-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13395 // CHECK19-NEXT: [[N_CASTED33:%.*]] = alloca i32, align 4
13396 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 4
13397 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 4
13398 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 4
13399 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 4
13400 // CHECK19-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
13401 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
13402 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
13403 // CHECK19-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13404 // CHECK19-NEXT: [[M_CASTED48:%.*]] = alloca i32, align 4
13405 // CHECK19-NEXT: [[N_CASTED49:%.*]] = alloca i32, align 4
13406 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 4
13407 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 4
13408 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 4
13409 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 4
13410 // CHECK19-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
13411 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
13412 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
13413 // CHECK19-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13414 // CHECK19-NEXT: store i32 0, ptr [[RETVAL]], align 4
13415 // CHECK19-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
13416 // CHECK19-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4
13417 // CHECK19-NEXT: store i32 100, ptr [[N]], align 4
13418 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
13419 // CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0()
13420 // CHECK19-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4
13421 // CHECK19-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
13422 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4
13423 // CHECK19-NEXT: store i32 10, ptr [[M]], align 4
13424 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4
13425 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4
13426 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4
13427 // CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
13428 // CHECK19-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
13429 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 24, i1 false)
13430 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13431 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[TMP6]], align 4
13432 // CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13433 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4
13434 // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
13435 // CHECK19-NEXT: store ptr null, ptr [[TMP8]], align 4
13436 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
13437 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP9]], align 4
13438 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
13439 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP10]], align 4
13440 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
13441 // CHECK19-NEXT: store ptr null, ptr [[TMP11]], align 4
13442 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
13443 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 4
13444 // CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
13445 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 4
13446 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
13447 // CHECK19-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 4
13448 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
13449 // CHECK19-NEXT: store ptr null, ptr [[TMP15]], align 4
13450 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13451 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13452 // CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
13453 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
13454 // CHECK19-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
13455 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13456 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
13457 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13458 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13459 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
13460 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13461 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
13462 // CHECK19-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
13463 // CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
13464 // CHECK19-NEXT: store i32 3, ptr [[TMP23]], align 4
13465 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
13466 // CHECK19-NEXT: store i32 3, ptr [[TMP24]], align 4
13467 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
13468 // CHECK19-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 4
13469 // CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
13470 // CHECK19-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 4
13471 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
13472 // CHECK19-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 4
13473 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
13474 // CHECK19-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 4
13475 // CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
13476 // CHECK19-NEXT: store ptr null, ptr [[TMP29]], align 4
13477 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
13478 // CHECK19-NEXT: store ptr null, ptr [[TMP30]], align 4
13479 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
13480 // CHECK19-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
13481 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
13482 // CHECK19-NEXT: store i64 0, ptr [[TMP32]], align 8
13483 // CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
13484 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
13485 // CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
13486 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
13487 // CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
13488 // CHECK19-NEXT: store i32 0, ptr [[TMP35]], align 4
13489 // CHECK19-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
13490 // CHECK19-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
13491 // CHECK19-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13492 // CHECK19: omp_offload.failed:
13493 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3:[0-9]+]]
13494 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
13495 // CHECK19: omp_offload.cont:
13496 // CHECK19-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
13497 // CHECK19-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
13498 // CHECK19-NEXT: [[TMP39:%.*]] = load i32, ptr [[N_CASTED3]], align 4
13499 // CHECK19-NEXT: [[TMP40:%.*]] = mul nuw i32 [[TMP0]], 4
13500 // CHECK19-NEXT: [[TMP41:%.*]] = sext i32 [[TMP40]] to i64
13501 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES7]], ptr align 4 @.offload_sizes.1, i32 24, i1 false)
13502 // CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
13503 // CHECK19-NEXT: store i32 [[TMP39]], ptr [[TMP42]], align 4
13504 // CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
13505 // CHECK19-NEXT: store i32 [[TMP39]], ptr [[TMP43]], align 4
13506 // CHECK19-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
13507 // CHECK19-NEXT: store ptr null, ptr [[TMP44]], align 4
13508 // CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
13509 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP45]], align 4
13510 // CHECK19-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
13511 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP46]], align 4
13512 // CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
13513 // CHECK19-NEXT: store ptr null, ptr [[TMP47]], align 4
13514 // CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
13515 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 4
13516 // CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
13517 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP49]], align 4
13518 // CHECK19-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
13519 // CHECK19-NEXT: store i64 [[TMP41]], ptr [[TMP50]], align 4
13520 // CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
13521 // CHECK19-NEXT: store ptr null, ptr [[TMP51]], align 4
13522 // CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
13523 // CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
13524 // CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
13525 // CHECK19-NEXT: [[TMP55:%.*]] = load i32, ptr [[N]], align 4
13526 // CHECK19-NEXT: store i32 [[TMP55]], ptr [[DOTCAPTURE_EXPR_9]], align 4
13527 // CHECK19-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
13528 // CHECK19-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP56]], 0
13529 // CHECK19-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
13530 // CHECK19-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
13531 // CHECK19-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
13532 // CHECK19-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
13533 // CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP57]], 1
13534 // CHECK19-NEXT: [[TMP58:%.*]] = zext i32 [[ADD14]] to i64
13535 // CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
13536 // CHECK19-NEXT: store i32 3, ptr [[TMP59]], align 4
13537 // CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
13538 // CHECK19-NEXT: store i32 3, ptr [[TMP60]], align 4
13539 // CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
13540 // CHECK19-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 4
13541 // CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
13542 // CHECK19-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 4
13543 // CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
13544 // CHECK19-NEXT: store ptr [[TMP54]], ptr [[TMP63]], align 4
13545 // CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
13546 // CHECK19-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP64]], align 4
13547 // CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
13548 // CHECK19-NEXT: store ptr null, ptr [[TMP65]], align 4
13549 // CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
13550 // CHECK19-NEXT: store ptr null, ptr [[TMP66]], align 4
13551 // CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
13552 // CHECK19-NEXT: store i64 [[TMP58]], ptr [[TMP67]], align 8
13553 // CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
13554 // CHECK19-NEXT: store i64 0, ptr [[TMP68]], align 8
13555 // CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
13556 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
13557 // CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
13558 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP70]], align 4
13559 // CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
13560 // CHECK19-NEXT: store i32 0, ptr [[TMP71]], align 4
13561 // CHECK19-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
13562 // CHECK19-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0
13563 // CHECK19-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
13564 // CHECK19: omp_offload.failed16:
13565 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP39]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
13566 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT17]]
13567 // CHECK19: omp_offload.cont17:
13568 // CHECK19-NEXT: [[TMP74:%.*]] = load i32, ptr [[M]], align 4
13569 // CHECK19-NEXT: store i32 [[TMP74]], ptr [[M_CASTED]], align 4
13570 // CHECK19-NEXT: [[TMP75:%.*]] = load i32, ptr [[M_CASTED]], align 4
13571 // CHECK19-NEXT: [[TMP76:%.*]] = load i32, ptr [[N]], align 4
13572 // CHECK19-NEXT: store i32 [[TMP76]], ptr [[N_CASTED18]], align 4
13573 // CHECK19-NEXT: [[TMP77:%.*]] = load i32, ptr [[N_CASTED18]], align 4
13574 // CHECK19-NEXT: [[TMP78:%.*]] = mul nuw i32 [[TMP0]], 4
13575 // CHECK19-NEXT: [[TMP79:%.*]] = sext i32 [[TMP78]] to i64
13576 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES22]], ptr align 4 @.offload_sizes.3, i32 32, i1 false)
13577 // CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
13578 // CHECK19-NEXT: store i32 [[TMP75]], ptr [[TMP80]], align 4
13579 // CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
13580 // CHECK19-NEXT: store i32 [[TMP75]], ptr [[TMP81]], align 4
13581 // CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
13582 // CHECK19-NEXT: store ptr null, ptr [[TMP82]], align 4
13583 // CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
13584 // CHECK19-NEXT: store i32 [[TMP77]], ptr [[TMP83]], align 4
13585 // CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
13586 // CHECK19-NEXT: store i32 [[TMP77]], ptr [[TMP84]], align 4
13587 // CHECK19-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
13588 // CHECK19-NEXT: store ptr null, ptr [[TMP85]], align 4
13589 // CHECK19-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
13590 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP86]], align 4
13591 // CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
13592 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP87]], align 4
13593 // CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
13594 // CHECK19-NEXT: store ptr null, ptr [[TMP88]], align 4
13595 // CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
13596 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP89]], align 4
13597 // CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
13598 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 4
13599 // CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
13600 // CHECK19-NEXT: store i64 [[TMP79]], ptr [[TMP91]], align 4
13601 // CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
13602 // CHECK19-NEXT: store ptr null, ptr [[TMP92]], align 4
13603 // CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
13604 // CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
13605 // CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
13606 // CHECK19-NEXT: [[TMP96:%.*]] = load i32, ptr [[N]], align 4
13607 // CHECK19-NEXT: store i32 [[TMP96]], ptr [[DOTCAPTURE_EXPR_24]], align 4
13608 // CHECK19-NEXT: [[TMP97:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
13609 // CHECK19-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP97]], 0
13610 // CHECK19-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
13611 // CHECK19-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
13612 // CHECK19-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
13613 // CHECK19-NEXT: [[TMP98:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
13614 // CHECK19-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP98]], 1
13615 // CHECK19-NEXT: [[TMP99:%.*]] = zext i32 [[ADD29]] to i64
13616 // CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
13617 // CHECK19-NEXT: store i32 3, ptr [[TMP100]], align 4
13618 // CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
13619 // CHECK19-NEXT: store i32 4, ptr [[TMP101]], align 4
13620 // CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
13621 // CHECK19-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 4
13622 // CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
13623 // CHECK19-NEXT: store ptr [[TMP94]], ptr [[TMP103]], align 4
13624 // CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
13625 // CHECK19-NEXT: store ptr [[TMP95]], ptr [[TMP104]], align 4
13626 // CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
13627 // CHECK19-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP105]], align 4
13628 // CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
13629 // CHECK19-NEXT: store ptr null, ptr [[TMP106]], align 4
13630 // CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
13631 // CHECK19-NEXT: store ptr null, ptr [[TMP107]], align 4
13632 // CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
13633 // CHECK19-NEXT: store i64 [[TMP99]], ptr [[TMP108]], align 8
13634 // CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
13635 // CHECK19-NEXT: store i64 0, ptr [[TMP109]], align 8
13636 // CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
13637 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP110]], align 4
13638 // CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
13639 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP111]], align 4
13640 // CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
13641 // CHECK19-NEXT: store i32 0, ptr [[TMP112]], align 4
13642 // CHECK19-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
13643 // CHECK19-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0
13644 // CHECK19-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
13645 // CHECK19: omp_offload.failed31:
13646 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP75]], i32 [[TMP77]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
13647 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT32]]
13648 // CHECK19: omp_offload.cont32:
13649 // CHECK19-NEXT: [[TMP115:%.*]] = load i32, ptr [[N]], align 4
13650 // CHECK19-NEXT: store i32 [[TMP115]], ptr [[N_CASTED33]], align 4
13651 // CHECK19-NEXT: [[TMP116:%.*]] = load i32, ptr [[N_CASTED33]], align 4
13652 // CHECK19-NEXT: [[TMP117:%.*]] = mul nuw i32 [[TMP0]], 4
13653 // CHECK19-NEXT: [[TMP118:%.*]] = sext i32 [[TMP117]] to i64
13654 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES37]], ptr align 4 @.offload_sizes.5, i32 24, i1 false)
13655 // CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13656 // CHECK19-NEXT: store i32 [[TMP116]], ptr [[TMP119]], align 4
13657 // CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13658 // CHECK19-NEXT: store i32 [[TMP116]], ptr [[TMP120]], align 4
13659 // CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 0
13660 // CHECK19-NEXT: store ptr null, ptr [[TMP121]], align 4
13661 // CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
13662 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP122]], align 4
13663 // CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
13664 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP123]], align 4
13665 // CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 1
13666 // CHECK19-NEXT: store ptr null, ptr [[TMP124]], align 4
13667 // CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
13668 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP125]], align 4
13669 // CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
13670 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP126]], align 4
13671 // CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
13672 // CHECK19-NEXT: store i64 [[TMP118]], ptr [[TMP127]], align 4
13673 // CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 2
13674 // CHECK19-NEXT: store ptr null, ptr [[TMP128]], align 4
13675 // CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13676 // CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13677 // CHECK19-NEXT: [[TMP131:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
13678 // CHECK19-NEXT: [[TMP132:%.*]] = load i32, ptr [[N]], align 4
13679 // CHECK19-NEXT: store i32 [[TMP132]], ptr [[DOTCAPTURE_EXPR_39]], align 4
13680 // CHECK19-NEXT: [[TMP133:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
13681 // CHECK19-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP133]], 0
13682 // CHECK19-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
13683 // CHECK19-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
13684 // CHECK19-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
13685 // CHECK19-NEXT: [[TMP134:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
13686 // CHECK19-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP134]], 1
13687 // CHECK19-NEXT: [[TMP135:%.*]] = zext i32 [[ADD44]] to i64
13688 // CHECK19-NEXT: [[TMP136:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
13689 // CHECK19-NEXT: store i32 3, ptr [[TMP136]], align 4
13690 // CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
13691 // CHECK19-NEXT: store i32 3, ptr [[TMP137]], align 4
13692 // CHECK19-NEXT: [[TMP138:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
13693 // CHECK19-NEXT: store ptr [[TMP129]], ptr [[TMP138]], align 4
13694 // CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
13695 // CHECK19-NEXT: store ptr [[TMP130]], ptr [[TMP139]], align 4
13696 // CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
13697 // CHECK19-NEXT: store ptr [[TMP131]], ptr [[TMP140]], align 4
13698 // CHECK19-NEXT: [[TMP141:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
13699 // CHECK19-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP141]], align 4
13700 // CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
13701 // CHECK19-NEXT: store ptr null, ptr [[TMP142]], align 4
13702 // CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
13703 // CHECK19-NEXT: store ptr null, ptr [[TMP143]], align 4
13704 // CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
13705 // CHECK19-NEXT: store i64 [[TMP135]], ptr [[TMP144]], align 8
13706 // CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
13707 // CHECK19-NEXT: store i64 0, ptr [[TMP145]], align 8
13708 // CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
13709 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP146]], align 4
13710 // CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
13711 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP147]], align 4
13712 // CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
13713 // CHECK19-NEXT: store i32 0, ptr [[TMP148]], align 4
13714 // CHECK19-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
13715 // CHECK19-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0
13716 // CHECK19-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
13717 // CHECK19: omp_offload.failed46:
13718 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP116]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
13719 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT47]]
13720 // CHECK19: omp_offload.cont47:
13721 // CHECK19-NEXT: [[TMP151:%.*]] = load i32, ptr [[M]], align 4
13722 // CHECK19-NEXT: store i32 [[TMP151]], ptr [[M_CASTED48]], align 4
13723 // CHECK19-NEXT: [[TMP152:%.*]] = load i32, ptr [[M_CASTED48]], align 4
13724 // CHECK19-NEXT: [[TMP153:%.*]] = load i32, ptr [[N]], align 4
13725 // CHECK19-NEXT: store i32 [[TMP153]], ptr [[N_CASTED49]], align 4
13726 // CHECK19-NEXT: [[TMP154:%.*]] = load i32, ptr [[N_CASTED49]], align 4
13727 // CHECK19-NEXT: [[TMP155:%.*]] = mul nuw i32 [[TMP0]], 4
13728 // CHECK19-NEXT: [[TMP156:%.*]] = sext i32 [[TMP155]] to i64
13729 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES53]], ptr align 4 @.offload_sizes.7, i32 32, i1 false)
13730 // CHECK19-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13731 // CHECK19-NEXT: store i32 [[TMP152]], ptr [[TMP157]], align 4
13732 // CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13733 // CHECK19-NEXT: store i32 [[TMP152]], ptr [[TMP158]], align 4
13734 // CHECK19-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 0
13735 // CHECK19-NEXT: store ptr null, ptr [[TMP159]], align 4
13736 // CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
13737 // CHECK19-NEXT: store i32 [[TMP154]], ptr [[TMP160]], align 4
13738 // CHECK19-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
13739 // CHECK19-NEXT: store i32 [[TMP154]], ptr [[TMP161]], align 4
13740 // CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 1
13741 // CHECK19-NEXT: store ptr null, ptr [[TMP162]], align 4
13742 // CHECK19-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
13743 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP163]], align 4
13744 // CHECK19-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
13745 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP164]], align 4
13746 // CHECK19-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 2
13747 // CHECK19-NEXT: store ptr null, ptr [[TMP165]], align 4
13748 // CHECK19-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
13749 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP166]], align 4
13750 // CHECK19-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
13751 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP167]], align 4
13752 // CHECK19-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
13753 // CHECK19-NEXT: store i64 [[TMP156]], ptr [[TMP168]], align 4
13754 // CHECK19-NEXT: [[TMP169:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 3
13755 // CHECK19-NEXT: store ptr null, ptr [[TMP169]], align 4
13756 // CHECK19-NEXT: [[TMP170:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13757 // CHECK19-NEXT: [[TMP171:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13758 // CHECK19-NEXT: [[TMP172:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
13759 // CHECK19-NEXT: [[TMP173:%.*]] = load i32, ptr [[N]], align 4
13760 // CHECK19-NEXT: store i32 [[TMP173]], ptr [[DOTCAPTURE_EXPR_55]], align 4
13761 // CHECK19-NEXT: [[TMP174:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
13762 // CHECK19-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP174]], 0
13763 // CHECK19-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
13764 // CHECK19-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
13765 // CHECK19-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
13766 // CHECK19-NEXT: [[TMP175:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
13767 // CHECK19-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP175]], 1
13768 // CHECK19-NEXT: [[TMP176:%.*]] = zext i32 [[ADD60]] to i64
13769 // CHECK19-NEXT: [[TMP177:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
13770 // CHECK19-NEXT: store i32 3, ptr [[TMP177]], align 4
13771 // CHECK19-NEXT: [[TMP178:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
13772 // CHECK19-NEXT: store i32 4, ptr [[TMP178]], align 4
13773 // CHECK19-NEXT: [[TMP179:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
13774 // CHECK19-NEXT: store ptr [[TMP170]], ptr [[TMP179]], align 4
13775 // CHECK19-NEXT: [[TMP180:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
13776 // CHECK19-NEXT: store ptr [[TMP171]], ptr [[TMP180]], align 4
13777 // CHECK19-NEXT: [[TMP181:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
13778 // CHECK19-NEXT: store ptr [[TMP172]], ptr [[TMP181]], align 4
13779 // CHECK19-NEXT: [[TMP182:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
13780 // CHECK19-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP182]], align 4
13781 // CHECK19-NEXT: [[TMP183:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
13782 // CHECK19-NEXT: store ptr null, ptr [[TMP183]], align 4
13783 // CHECK19-NEXT: [[TMP184:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
13784 // CHECK19-NEXT: store ptr null, ptr [[TMP184]], align 4
13785 // CHECK19-NEXT: [[TMP185:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
13786 // CHECK19-NEXT: store i64 [[TMP176]], ptr [[TMP185]], align 8
13787 // CHECK19-NEXT: [[TMP186:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
13788 // CHECK19-NEXT: store i64 0, ptr [[TMP186]], align 8
13789 // CHECK19-NEXT: [[TMP187:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
13790 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP187]], align 4
13791 // CHECK19-NEXT: [[TMP188:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
13792 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP188]], align 4
13793 // CHECK19-NEXT: [[TMP189:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
13794 // CHECK19-NEXT: store i32 0, ptr [[TMP189]], align 4
13795 // CHECK19-NEXT: [[TMP190:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
13796 // CHECK19-NEXT: [[TMP191:%.*]] = icmp ne i32 [[TMP190]], 0
13797 // CHECK19-NEXT: br i1 [[TMP191]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
13798 // CHECK19: omp_offload.failed62:
13799 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP152]], i32 [[TMP154]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3]]
13800 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT63]]
13801 // CHECK19: omp_offload.cont63:
13802 // CHECK19-NEXT: [[TMP192:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
13803 // CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]])
13804 // CHECK19-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
13805 // CHECK19-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4
13806 // CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]])
13807 // CHECK19-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4
13808 // CHECK19-NEXT: ret i32 [[TMP194]]
13811 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
13812 // CHECK19-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
13813 // CHECK19-NEXT: entry:
13814 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
13815 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
13816 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13817 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
13818 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
13819 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13820 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
13821 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13822 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
13823 // CHECK19-NEXT: ret void
13826 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
13827 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
13828 // CHECK19-NEXT: entry:
13829 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13830 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13831 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
13832 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
13833 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13834 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13835 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
13836 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13837 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13838 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
13839 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13840 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13841 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13842 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13843 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
13844 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13845 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13846 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
13847 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
13848 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13849 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
13850 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
13851 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13852 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
13853 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
13854 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13855 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
13856 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13857 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13858 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
13859 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
13860 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13861 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
13862 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13863 // CHECK19: omp.precond.then:
13864 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
13865 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13866 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
13867 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13868 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13869 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13870 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
13871 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13872 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13873 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13874 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
13875 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13876 // CHECK19: cond.true:
13877 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13878 // CHECK19-NEXT: br label [[COND_END:%.*]]
13879 // CHECK19: cond.false:
13880 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13881 // CHECK19-NEXT: br label [[COND_END]]
13882 // CHECK19: cond.end:
13883 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
13884 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
13885 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
13886 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
13887 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13888 // CHECK19: omp.inner.for.cond:
13889 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]]
13890 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
13891 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
13892 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13893 // CHECK19: omp.inner.for.body:
13894 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP14]]
13895 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
13896 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP14]]
13897 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13898 // CHECK19: omp.inner.for.inc:
13899 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
13900 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP14]]
13901 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
13902 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
13903 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
13904 // CHECK19: omp.inner.for.end:
13905 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13906 // CHECK19: omp.loop.exit:
13907 // CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13908 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
13909 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
13910 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13911 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
13912 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13913 // CHECK19: .omp.final.then:
13914 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13915 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
13916 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13917 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13918 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13919 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
13920 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
13921 // CHECK19: .omp.final.done:
13922 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
13923 // CHECK19: omp.precond.end:
13924 // CHECK19-NEXT: ret void
13927 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
13928 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
13929 // CHECK19-NEXT: entry:
13930 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13931 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13932 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13933 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13934 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
13935 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
13936 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13937 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13938 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
13939 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13940 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13941 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
13942 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13943 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13944 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13945 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13946 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
13947 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13948 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13949 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13950 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13951 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
13952 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
13953 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13954 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
13955 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
13956 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13957 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
13958 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
13959 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13960 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
13961 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13962 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13963 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
13964 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
13965 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13966 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
13967 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13968 // CHECK19: omp.precond.then:
13969 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
13970 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13971 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
13972 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13973 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13974 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
13975 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
13976 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13977 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13978 // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13979 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
13980 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13981 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13982 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13983 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
13984 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13985 // CHECK19: cond.true:
13986 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13987 // CHECK19-NEXT: br label [[COND_END:%.*]]
13988 // CHECK19: cond.false:
13989 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13990 // CHECK19-NEXT: br label [[COND_END]]
13991 // CHECK19: cond.end:
13992 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
13993 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
13994 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13995 // CHECK19-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
13996 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13997 // CHECK19: omp.inner.for.cond:
13998 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
13999 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
14000 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
14001 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14002 // CHECK19: omp.inner.for.body:
14003 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
14004 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
14005 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14006 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
14007 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
14008 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
14009 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
14010 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14011 // CHECK19: omp.body.continue:
14012 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14013 // CHECK19: omp.inner.for.inc:
14014 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
14015 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
14016 // CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
14017 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
14018 // CHECK19: omp.inner.for.end:
14019 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14020 // CHECK19: omp.loop.exit:
14021 // CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14022 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
14023 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
14024 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14025 // CHECK19-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14026 // CHECK19-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14027 // CHECK19: .omp.final.then:
14028 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14029 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
14030 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14031 // CHECK19-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
14032 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
14033 // CHECK19-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
14034 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14035 // CHECK19: .omp.final.done:
14036 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14037 // CHECK19: omp.precond.end:
14038 // CHECK19-NEXT: ret void
14041 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
14042 // CHECK19-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14043 // CHECK19-NEXT: entry:
14044 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14045 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14046 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14047 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14048 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14049 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14050 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14051 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14052 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
14053 // CHECK19-NEXT: ret void
14056 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
14057 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14058 // CHECK19-NEXT: entry:
14059 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14060 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14061 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14062 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14063 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14064 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14065 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14066 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14067 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14068 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14069 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14070 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14071 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14072 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14073 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14074 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14075 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14076 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14077 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14078 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14079 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14080 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14081 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14082 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14083 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14084 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14085 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14086 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14087 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14088 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14089 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14090 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14091 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14092 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14093 // CHECK19: omp.precond.then:
14094 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14095 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14096 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14097 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14098 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14099 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14100 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
14101 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14102 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14103 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14104 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
14105 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14106 // CHECK19: cond.true:
14107 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14108 // CHECK19-NEXT: br label [[COND_END:%.*]]
14109 // CHECK19: cond.false:
14110 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14111 // CHECK19-NEXT: br label [[COND_END]]
14112 // CHECK19: cond.end:
14113 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
14114 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14115 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14116 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
14117 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14118 // CHECK19: omp.inner.for.cond:
14119 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
14120 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
14121 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
14122 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14123 // CHECK19: omp.inner.for.body:
14124 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
14125 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
14126 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP23]]
14127 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14128 // CHECK19: omp.inner.for.inc:
14129 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
14130 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
14131 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
14132 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
14133 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
14134 // CHECK19: omp.inner.for.end:
14135 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14136 // CHECK19: omp.loop.exit:
14137 // CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14138 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
14139 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
14140 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14141 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
14142 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14143 // CHECK19: .omp.final.then:
14144 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14145 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
14146 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14147 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14148 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14149 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
14150 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14151 // CHECK19: .omp.final.done:
14152 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14153 // CHECK19: omp.precond.end:
14154 // CHECK19-NEXT: ret void
14157 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
14158 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14159 // CHECK19-NEXT: entry:
14160 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14161 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14162 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14163 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14164 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14165 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14166 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14167 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14168 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14169 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14170 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14171 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14172 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14173 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14174 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14175 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14176 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14177 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14178 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14179 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14180 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14181 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14182 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14183 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14184 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14185 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14186 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14187 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14188 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14189 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14190 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14191 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14192 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14193 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14194 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14195 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14196 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14197 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14198 // CHECK19: omp.precond.then:
14199 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14200 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14201 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14202 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14203 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14204 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14205 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14206 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14207 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14208 // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14209 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
14210 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14211 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14212 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14213 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
14214 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14215 // CHECK19: cond.true:
14216 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14217 // CHECK19-NEXT: br label [[COND_END:%.*]]
14218 // CHECK19: cond.false:
14219 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14220 // CHECK19-NEXT: br label [[COND_END]]
14221 // CHECK19: cond.end:
14222 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
14223 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
14224 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14225 // CHECK19-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
14226 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14227 // CHECK19: omp.inner.for.cond:
14228 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
14229 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
14230 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
14231 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14232 // CHECK19: omp.inner.for.body:
14233 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
14234 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
14235 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14236 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
14237 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
14238 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
14239 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
14240 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14241 // CHECK19: omp.body.continue:
14242 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14243 // CHECK19: omp.inner.for.inc:
14244 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
14245 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
14246 // CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
14247 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
14248 // CHECK19: omp.inner.for.end:
14249 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14250 // CHECK19: omp.loop.exit:
14251 // CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14252 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
14253 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
14254 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14255 // CHECK19-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14256 // CHECK19-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14257 // CHECK19: .omp.final.then:
14258 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14259 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
14260 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14261 // CHECK19-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
14262 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
14263 // CHECK19-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
14264 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14265 // CHECK19: .omp.final.done:
14266 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14267 // CHECK19: omp.precond.end:
14268 // CHECK19-NEXT: ret void
14271 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
14272 // CHECK19-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14273 // CHECK19-NEXT: entry:
14274 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
14275 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14276 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14277 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14278 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14279 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14280 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
14281 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14282 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14283 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14284 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14285 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14286 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
14287 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
14288 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14289 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14290 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14291 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
14292 // CHECK19-NEXT: ret void
14295 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
14296 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
14297 // CHECK19-NEXT: entry:
14298 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14299 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14300 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14301 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14302 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14303 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14304 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14305 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14306 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14307 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14308 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14309 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14310 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14311 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14312 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14313 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14314 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14315 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14316 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14317 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14318 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14319 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14320 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14321 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14322 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14323 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14324 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14325 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14326 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14327 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14328 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14329 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14330 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14331 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14332 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14333 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14334 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14335 // CHECK19: omp.precond.then:
14336 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14337 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14338 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14339 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14340 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14341 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14342 // CHECK19-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14343 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
14344 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
14345 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14346 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14347 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14348 // CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14349 // CHECK19: cond.true:
14350 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14351 // CHECK19-NEXT: br label [[COND_END:%.*]]
14352 // CHECK19: cond.false:
14353 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14354 // CHECK19-NEXT: br label [[COND_END]]
14355 // CHECK19: cond.end:
14356 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14357 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14358 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14359 // CHECK19-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
14360 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14361 // CHECK19: omp.inner.for.cond:
14362 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
14363 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
14364 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
14365 // CHECK19-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
14366 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14367 // CHECK19: omp.inner.for.body:
14368 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14369 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14370 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP29]]
14371 // CHECK19-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
14372 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
14373 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i32 [[TMP17]], i32 [[TMP18]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP20]]), !llvm.access.group [[ACC_GRP29]]
14374 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14375 // CHECK19: omp.inner.for.inc:
14376 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
14377 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
14378 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
14379 // CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
14380 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14381 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
14382 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
14383 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14384 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14385 // CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
14386 // CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
14387 // CHECK19-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14388 // CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14389 // CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
14390 // CHECK19-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
14391 // CHECK19-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
14392 // CHECK19: cond.true11:
14393 // CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
14394 // CHECK19-NEXT: br label [[COND_END13:%.*]]
14395 // CHECK19: cond.false12:
14396 // CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14397 // CHECK19-NEXT: br label [[COND_END13]]
14398 // CHECK19: cond.end13:
14399 // CHECK19-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE11]] ], [ [[TMP30]], [[COND_FALSE12]] ]
14400 // CHECK19-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14401 // CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14402 // CHECK19-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
14403 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
14404 // CHECK19: omp.inner.for.end:
14405 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14406 // CHECK19: omp.loop.exit:
14407 // CHECK19-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14408 // CHECK19-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
14409 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP33]])
14410 // CHECK19-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14411 // CHECK19-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
14412 // CHECK19-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14413 // CHECK19: .omp.final.then:
14414 // CHECK19-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14415 // CHECK19-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP36]], 0
14416 // CHECK19-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
14417 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
14418 // CHECK19-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
14419 // CHECK19-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
14420 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14421 // CHECK19: .omp.final.done:
14422 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14423 // CHECK19: omp.precond.end:
14424 // CHECK19-NEXT: ret void
14427 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
14428 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
14429 // CHECK19-NEXT: entry:
14430 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14431 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14432 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14433 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14434 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14435 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14436 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14437 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14438 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14439 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14440 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14441 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14442 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14443 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14444 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14445 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14446 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14447 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14448 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14449 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14450 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14451 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14452 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14453 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14454 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14455 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14456 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14457 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14458 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14459 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14460 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14461 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14462 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14463 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14464 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14465 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14466 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14467 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14468 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14469 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14470 // CHECK19: omp.precond.then:
14471 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14472 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14473 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14474 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14475 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14476 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14477 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14478 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14479 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14480 // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14481 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
14482 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14483 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14484 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14485 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
14486 // CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14487 // CHECK19: cond.true:
14488 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14489 // CHECK19-NEXT: br label [[COND_END:%.*]]
14490 // CHECK19: cond.false:
14491 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14492 // CHECK19-NEXT: br label [[COND_END]]
14493 // CHECK19: cond.end:
14494 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
14495 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
14496 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14497 // CHECK19-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
14498 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14499 // CHECK19: omp.inner.for.cond:
14500 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
14501 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
14502 // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
14503 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14504 // CHECK19: omp.inner.for.body:
14505 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
14506 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
14507 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14508 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
14509 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
14510 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
14511 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
14512 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14513 // CHECK19: omp.body.continue:
14514 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14515 // CHECK19: omp.inner.for.inc:
14516 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
14517 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
14518 // CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
14519 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
14520 // CHECK19: omp.inner.for.end:
14521 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14522 // CHECK19: omp.loop.exit:
14523 // CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14524 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
14525 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
14526 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14527 // CHECK19-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14528 // CHECK19-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14529 // CHECK19: .omp.final.then:
14530 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14531 // CHECK19-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
14532 // CHECK19-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
14533 // CHECK19-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
14534 // CHECK19-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
14535 // CHECK19-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
14536 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14537 // CHECK19: .omp.final.done:
14538 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14539 // CHECK19: omp.precond.end:
14540 // CHECK19-NEXT: ret void
14543 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
14544 // CHECK19-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14545 // CHECK19-NEXT: entry:
14546 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14547 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14548 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14549 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14550 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14551 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14552 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14553 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14554 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
14555 // CHECK19-NEXT: ret void
14558 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
14559 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14560 // CHECK19-NEXT: entry:
14561 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14562 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14563 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14564 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14565 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14566 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14567 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14568 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14569 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14570 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14571 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14572 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14573 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14574 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14575 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14576 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14577 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14578 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14579 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14580 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14581 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14582 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14583 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14584 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14585 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14586 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14587 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14588 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14589 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14590 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14591 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14592 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14593 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14594 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14595 // CHECK19: omp.precond.then:
14596 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14597 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14598 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14599 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14600 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14601 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14602 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
14603 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14604 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14605 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14606 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
14607 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14608 // CHECK19: cond.true:
14609 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14610 // CHECK19-NEXT: br label [[COND_END:%.*]]
14611 // CHECK19: cond.false:
14612 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14613 // CHECK19-NEXT: br label [[COND_END]]
14614 // CHECK19: cond.end:
14615 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
14616 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14617 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14618 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
14619 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14620 // CHECK19: omp.inner.for.cond:
14621 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
14622 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
14623 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
14624 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14625 // CHECK19: omp.inner.for.body:
14626 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
14627 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
14628 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP35]]
14629 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14630 // CHECK19: omp.inner.for.inc:
14631 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
14632 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
14633 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
14634 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
14635 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
14636 // CHECK19: omp.inner.for.end:
14637 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14638 // CHECK19: omp.loop.exit:
14639 // CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14640 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
14641 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
14642 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14643 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
14644 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14645 // CHECK19: .omp.final.then:
14646 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14647 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
14648 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14649 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14650 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14651 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
14652 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14653 // CHECK19: .omp.final.done:
14654 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14655 // CHECK19: omp.precond.end:
14656 // CHECK19-NEXT: ret void
14659 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
14660 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14661 // CHECK19-NEXT: entry:
14662 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14663 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14664 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14665 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14666 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14667 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14668 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14669 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14670 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14671 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14672 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14673 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14674 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14675 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14676 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14677 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14678 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14679 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14680 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14681 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14682 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14683 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14684 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14685 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14686 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14687 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14688 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14689 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14690 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14691 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14692 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14693 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14694 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14695 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14696 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14697 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14698 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14699 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14700 // CHECK19: omp.precond.then:
14701 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14702 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14703 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14704 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14705 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14706 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14707 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14708 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14709 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14710 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14711 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14712 // CHECK19-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14713 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
14714 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
14715 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
14716 // CHECK19: omp.dispatch.cond:
14717 // CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14718 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
14719 // CHECK19-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
14720 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
14721 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14722 // CHECK19: omp.dispatch.body:
14723 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14724 // CHECK19-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
14725 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14726 // CHECK19: omp.inner.for.cond:
14727 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
14728 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
14729 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14730 // CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14731 // CHECK19: omp.inner.for.body:
14732 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
14733 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14734 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14735 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
14736 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
14737 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP20]]
14738 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
14739 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14740 // CHECK19: omp.body.continue:
14741 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14742 // CHECK19: omp.inner.for.inc:
14743 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
14744 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP21]], 1
14745 // CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
14746 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
14747 // CHECK19: omp.inner.for.end:
14748 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
14749 // CHECK19: omp.dispatch.inc:
14750 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
14751 // CHECK19: omp.dispatch.end:
14752 // CHECK19-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14753 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
14754 // CHECK19-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP23]])
14755 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14756 // CHECK19-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
14757 // CHECK19-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14758 // CHECK19: .omp.final.then:
14759 // CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14760 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
14761 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14762 // CHECK19-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
14763 // CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
14764 // CHECK19-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
14765 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14766 // CHECK19: .omp.final.done:
14767 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14768 // CHECK19: omp.precond.end:
14769 // CHECK19-NEXT: ret void
14772 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
14773 // CHECK19-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14774 // CHECK19-NEXT: entry:
14775 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
14776 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14777 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14778 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14779 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14780 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14781 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
14782 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14783 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14784 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14785 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14786 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14787 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
14788 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
14789 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14790 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14791 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14792 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
14793 // CHECK19-NEXT: ret void
14796 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
14797 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
14798 // CHECK19-NEXT: entry:
14799 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14800 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14801 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14802 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14803 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14804 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14805 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14806 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14807 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14808 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14809 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14810 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14811 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14812 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14813 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14814 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14815 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14816 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14817 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14818 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14819 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14820 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14821 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14822 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14823 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14824 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14825 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14826 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14827 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14828 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14829 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14830 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14831 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14832 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14833 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14834 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14835 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14836 // CHECK19: omp.precond.then:
14837 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14838 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14839 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14840 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14841 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14842 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14843 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
14844 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14845 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14846 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14847 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
14848 // CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14849 // CHECK19: cond.true:
14850 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14851 // CHECK19-NEXT: br label [[COND_END:%.*]]
14852 // CHECK19: cond.false:
14853 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14854 // CHECK19-NEXT: br label [[COND_END]]
14855 // CHECK19: cond.end:
14856 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
14857 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14858 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14859 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
14860 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14861 // CHECK19: omp.inner.for.cond:
14862 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]]
14863 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
14864 // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
14865 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14866 // CHECK19: omp.inner.for.body:
14867 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP41]]
14868 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
14869 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP41]]
14870 // CHECK19-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
14871 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
14872 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP19]]), !llvm.access.group [[ACC_GRP41]]
14873 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14874 // CHECK19: omp.inner.for.inc:
14875 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
14876 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP41]]
14877 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
14878 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
14879 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
14880 // CHECK19: omp.inner.for.end:
14881 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14882 // CHECK19: omp.loop.exit:
14883 // CHECK19-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14884 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
14885 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
14886 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14887 // CHECK19-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
14888 // CHECK19-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14889 // CHECK19: .omp.final.then:
14890 // CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14891 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP26]], 0
14892 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14893 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
14894 // CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
14895 // CHECK19-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
14896 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14897 // CHECK19: .omp.final.done:
14898 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14899 // CHECK19: omp.precond.end:
14900 // CHECK19-NEXT: ret void
14903 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
14904 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
14905 // CHECK19-NEXT: entry:
14906 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14907 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14908 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14909 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14910 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14911 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14912 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14913 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14914 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14915 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14916 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14917 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14918 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14919 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14920 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14921 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14922 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14923 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14924 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14925 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14926 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14927 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14928 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14929 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14930 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14931 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14932 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14933 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14934 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14935 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14936 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14937 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14938 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14939 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14940 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14941 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14942 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14943 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14944 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14945 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14946 // CHECK19: omp.precond.then:
14947 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14948 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14949 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14950 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14951 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14952 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14953 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14954 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14955 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14956 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14957 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14958 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14959 // CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14960 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
14961 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 1073741859, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
14962 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
14963 // CHECK19: omp.dispatch.cond:
14964 // CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14965 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
14966 // CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
14967 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
14968 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14969 // CHECK19: omp.dispatch.body:
14970 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14971 // CHECK19-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
14972 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14973 // CHECK19: omp.inner.for.cond:
14974 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]]
14975 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
14976 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
14977 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14978 // CHECK19: omp.inner.for.body:
14979 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
14980 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
14981 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14982 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
14983 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
14984 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP21]]
14985 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]]
14986 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14987 // CHECK19: omp.body.continue:
14988 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14989 // CHECK19: omp.inner.for.inc:
14990 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
14991 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP22]], 1
14992 // CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
14993 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
14994 // CHECK19: omp.inner.for.end:
14995 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
14996 // CHECK19: omp.dispatch.inc:
14997 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
14998 // CHECK19: omp.dispatch.end:
14999 // CHECK19-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15000 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
15001 // CHECK19-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP24]])
15002 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15003 // CHECK19-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
15004 // CHECK19-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15005 // CHECK19: .omp.final.then:
15006 // CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
15007 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP27]], 0
15008 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15009 // CHECK19-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
15010 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
15011 // CHECK19-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
15012 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15013 // CHECK19: .omp.final.done:
15014 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
15015 // CHECK19: omp.precond.end:
15016 // CHECK19-NEXT: ret void
15019 // CHECK19-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
15020 // CHECK19-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR5:[0-9]+]] comdat {
15021 // CHECK19-NEXT: entry:
15022 // CHECK19-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
15023 // CHECK19-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
15024 // CHECK19-NEXT: [[M:%.*]] = alloca i32, align 4
15025 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
15026 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
15027 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
15028 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15029 // CHECK19-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
15030 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 4
15031 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 4
15032 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 4
15033 // CHECK19-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
15034 // CHECK19-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15035 // CHECK19-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
15036 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 4
15037 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 4
15038 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 4
15039 // CHECK19-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
15040 // CHECK19-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15041 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 4
15042 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 4
15043 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 4
15044 // CHECK19-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
15045 // CHECK19-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15046 // CHECK19-NEXT: [[M_CASTED22:%.*]] = alloca i32, align 4
15047 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 4
15048 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 4
15049 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 4
15050 // CHECK19-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
15051 // CHECK19-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15052 // CHECK19-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
15053 // CHECK19-NEXT: store i32 10, ptr [[M]], align 4
15054 // CHECK19-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15055 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP0]], align 4
15056 // CHECK19-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15057 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
15058 // CHECK19-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
15059 // CHECK19-NEXT: store ptr null, ptr [[TMP2]], align 4
15060 // CHECK19-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15061 // CHECK19-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15062 // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
15063 // CHECK19-NEXT: store i32 3, ptr [[TMP5]], align 4
15064 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
15065 // CHECK19-NEXT: store i32 1, ptr [[TMP6]], align 4
15066 // CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
15067 // CHECK19-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
15068 // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
15069 // CHECK19-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
15070 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
15071 // CHECK19-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 4
15072 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
15073 // CHECK19-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 4
15074 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
15075 // CHECK19-NEXT: store ptr null, ptr [[TMP11]], align 4
15076 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
15077 // CHECK19-NEXT: store ptr null, ptr [[TMP12]], align 4
15078 // CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
15079 // CHECK19-NEXT: store i64 10, ptr [[TMP13]], align 8
15080 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
15081 // CHECK19-NEXT: store i64 0, ptr [[TMP14]], align 8
15082 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
15083 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
15084 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
15085 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
15086 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
15087 // CHECK19-NEXT: store i32 0, ptr [[TMP17]], align 4
15088 // CHECK19-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
15089 // CHECK19-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
15090 // CHECK19-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
15091 // CHECK19: omp_offload.failed:
15092 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR3]]
15093 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
15094 // CHECK19: omp_offload.cont:
15095 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
15096 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP20]], align 4
15097 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
15098 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP21]], align 4
15099 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
15100 // CHECK19-NEXT: store ptr null, ptr [[TMP22]], align 4
15101 // CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
15102 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
15103 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
15104 // CHECK19-NEXT: store i32 3, ptr [[TMP25]], align 4
15105 // CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
15106 // CHECK19-NEXT: store i32 1, ptr [[TMP26]], align 4
15107 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
15108 // CHECK19-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
15109 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
15110 // CHECK19-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
15111 // CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
15112 // CHECK19-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 4
15113 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
15114 // CHECK19-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 4
15115 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
15116 // CHECK19-NEXT: store ptr null, ptr [[TMP31]], align 4
15117 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
15118 // CHECK19-NEXT: store ptr null, ptr [[TMP32]], align 4
15119 // CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
15120 // CHECK19-NEXT: store i64 10, ptr [[TMP33]], align 8
15121 // CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
15122 // CHECK19-NEXT: store i64 0, ptr [[TMP34]], align 8
15123 // CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
15124 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
15125 // CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
15126 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
15127 // CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
15128 // CHECK19-NEXT: store i32 0, ptr [[TMP37]], align 4
15129 // CHECK19-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
15130 // CHECK19-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
15131 // CHECK19-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
15132 // CHECK19: omp_offload.failed6:
15133 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR3]]
15134 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT7]]
15135 // CHECK19: omp_offload.cont7:
15136 // CHECK19-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
15137 // CHECK19-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
15138 // CHECK19-NEXT: [[TMP41:%.*]] = load i32, ptr [[M_CASTED]], align 4
15139 // CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
15140 // CHECK19-NEXT: store i32 [[TMP41]], ptr [[TMP42]], align 4
15141 // CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
15142 // CHECK19-NEXT: store i32 [[TMP41]], ptr [[TMP43]], align 4
15143 // CHECK19-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 0
15144 // CHECK19-NEXT: store ptr null, ptr [[TMP44]], align 4
15145 // CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
15146 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP45]], align 4
15147 // CHECK19-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
15148 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP46]], align 4
15149 // CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 1
15150 // CHECK19-NEXT: store ptr null, ptr [[TMP47]], align 4
15151 // CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
15152 // CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
15153 // CHECK19-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
15154 // CHECK19-NEXT: store i32 3, ptr [[TMP50]], align 4
15155 // CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
15156 // CHECK19-NEXT: store i32 2, ptr [[TMP51]], align 4
15157 // CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
15158 // CHECK19-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 4
15159 // CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
15160 // CHECK19-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 4
15161 // CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
15162 // CHECK19-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 4
15163 // CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
15164 // CHECK19-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 4
15165 // CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
15166 // CHECK19-NEXT: store ptr null, ptr [[TMP56]], align 4
15167 // CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
15168 // CHECK19-NEXT: store ptr null, ptr [[TMP57]], align 4
15169 // CHECK19-NEXT: [[TMP58:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
15170 // CHECK19-NEXT: store i64 10, ptr [[TMP58]], align 8
15171 // CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
15172 // CHECK19-NEXT: store i64 0, ptr [[TMP59]], align 8
15173 // CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
15174 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
15175 // CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
15176 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
15177 // CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
15178 // CHECK19-NEXT: store i32 0, ptr [[TMP62]], align 4
15179 // CHECK19-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
15180 // CHECK19-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
15181 // CHECK19-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
15182 // CHECK19: omp_offload.failed13:
15183 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i32 [[TMP41]], ptr [[A]]) #[[ATTR3]]
15184 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT14]]
15185 // CHECK19: omp_offload.cont14:
15186 // CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
15187 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP65]], align 4
15188 // CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
15189 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP66]], align 4
15190 // CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0
15191 // CHECK19-NEXT: store ptr null, ptr [[TMP67]], align 4
15192 // CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
15193 // CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
15194 // CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
15195 // CHECK19-NEXT: store i32 3, ptr [[TMP70]], align 4
15196 // CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
15197 // CHECK19-NEXT: store i32 1, ptr [[TMP71]], align 4
15198 // CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
15199 // CHECK19-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 4
15200 // CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
15201 // CHECK19-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 4
15202 // CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
15203 // CHECK19-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 4
15204 // CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
15205 // CHECK19-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 4
15206 // CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
15207 // CHECK19-NEXT: store ptr null, ptr [[TMP76]], align 4
15208 // CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
15209 // CHECK19-NEXT: store ptr null, ptr [[TMP77]], align 4
15210 // CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
15211 // CHECK19-NEXT: store i64 10, ptr [[TMP78]], align 8
15212 // CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
15213 // CHECK19-NEXT: store i64 0, ptr [[TMP79]], align 8
15214 // CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
15215 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
15216 // CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
15217 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
15218 // CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
15219 // CHECK19-NEXT: store i32 0, ptr [[TMP82]], align 4
15220 // CHECK19-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
15221 // CHECK19-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
15222 // CHECK19-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
15223 // CHECK19: omp_offload.failed20:
15224 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR3]]
15225 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT21]]
15226 // CHECK19: omp_offload.cont21:
15227 // CHECK19-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
15228 // CHECK19-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
15229 // CHECK19-NEXT: [[TMP86:%.*]] = load i32, ptr [[M_CASTED22]], align 4
15230 // CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
15231 // CHECK19-NEXT: store i32 [[TMP86]], ptr [[TMP87]], align 4
15232 // CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
15233 // CHECK19-NEXT: store i32 [[TMP86]], ptr [[TMP88]], align 4
15234 // CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0
15235 // CHECK19-NEXT: store ptr null, ptr [[TMP89]], align 4
15236 // CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
15237 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP90]], align 4
15238 // CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
15239 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP91]], align 4
15240 // CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1
15241 // CHECK19-NEXT: store ptr null, ptr [[TMP92]], align 4
15242 // CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
15243 // CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
15244 // CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
15245 // CHECK19-NEXT: store i32 3, ptr [[TMP95]], align 4
15246 // CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
15247 // CHECK19-NEXT: store i32 2, ptr [[TMP96]], align 4
15248 // CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
15249 // CHECK19-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 4
15250 // CHECK19-NEXT: [[TMP98:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
15251 // CHECK19-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 4
15252 // CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
15253 // CHECK19-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 4
15254 // CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
15255 // CHECK19-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 4
15256 // CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
15257 // CHECK19-NEXT: store ptr null, ptr [[TMP101]], align 4
15258 // CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
15259 // CHECK19-NEXT: store ptr null, ptr [[TMP102]], align 4
15260 // CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
15261 // CHECK19-NEXT: store i64 10, ptr [[TMP103]], align 8
15262 // CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
15263 // CHECK19-NEXT: store i64 0, ptr [[TMP104]], align 8
15264 // CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
15265 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
15266 // CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
15267 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
15268 // CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
15269 // CHECK19-NEXT: store i32 0, ptr [[TMP107]], align 4
15270 // CHECK19-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
15271 // CHECK19-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
15272 // CHECK19-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
15273 // CHECK19: omp_offload.failed28:
15274 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i32 [[TMP86]], ptr [[A]]) #[[ATTR3]]
15275 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT29]]
15276 // CHECK19: omp_offload.cont29:
15277 // CHECK19-NEXT: ret i32 0
15280 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
15281 // CHECK19-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15282 // CHECK19-NEXT: entry:
15283 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15284 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15285 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15286 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
15287 // CHECK19-NEXT: ret void
15290 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
15291 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15292 // CHECK19-NEXT: entry:
15293 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15294 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15295 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15296 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15297 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15298 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15299 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15300 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15301 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15302 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15303 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15304 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15305 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15306 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15307 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15308 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15309 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15310 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15311 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15312 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15313 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15314 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15315 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15316 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15317 // CHECK19: cond.true:
15318 // CHECK19-NEXT: br label [[COND_END:%.*]]
15319 // CHECK19: cond.false:
15320 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15321 // CHECK19-NEXT: br label [[COND_END]]
15322 // CHECK19: cond.end:
15323 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15324 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15325 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15326 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15327 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15328 // CHECK19: omp.inner.for.cond:
15329 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]]
15330 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
15331 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15332 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15333 // CHECK19: omp.inner.for.body:
15334 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP47]]
15335 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
15336 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP47]]
15337 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15338 // CHECK19: omp.inner.for.inc:
15339 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
15340 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP47]]
15341 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
15342 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
15343 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
15344 // CHECK19: omp.inner.for.end:
15345 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15346 // CHECK19: omp.loop.exit:
15347 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15348 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15349 // CHECK19-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
15350 // CHECK19-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15351 // CHECK19: .omp.final.then:
15352 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15353 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15354 // CHECK19: .omp.final.done:
15355 // CHECK19-NEXT: ret void
15358 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
15359 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15360 // CHECK19-NEXT: entry:
15361 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15362 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15363 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15364 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15365 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15366 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15367 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15368 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15369 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15370 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15371 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15372 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15373 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15374 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15375 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15376 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15377 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15378 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15379 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15380 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15381 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15382 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15383 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15384 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15385 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15386 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15387 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15388 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
15389 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15390 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15391 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
15392 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15393 // CHECK19: cond.true:
15394 // CHECK19-NEXT: br label [[COND_END:%.*]]
15395 // CHECK19: cond.false:
15396 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15397 // CHECK19-NEXT: br label [[COND_END]]
15398 // CHECK19: cond.end:
15399 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
15400 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
15401 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15402 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
15403 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15404 // CHECK19: omp.inner.for.cond:
15405 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]]
15406 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
15407 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
15408 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15409 // CHECK19: omp.inner.for.body:
15410 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
15411 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
15412 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15413 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
15414 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
15415 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
15416 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP50]]
15417 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15418 // CHECK19: omp.body.continue:
15419 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15420 // CHECK19: omp.inner.for.inc:
15421 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
15422 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
15423 // CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
15424 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
15425 // CHECK19: omp.inner.for.end:
15426 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15427 // CHECK19: omp.loop.exit:
15428 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
15429 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15430 // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
15431 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15432 // CHECK19: .omp.final.then:
15433 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15434 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15435 // CHECK19: .omp.final.done:
15436 // CHECK19-NEXT: ret void
15439 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
15440 // CHECK19-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15441 // CHECK19-NEXT: entry:
15442 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15443 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15444 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15445 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
15446 // CHECK19-NEXT: ret void
15449 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
15450 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15451 // CHECK19-NEXT: entry:
15452 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15453 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15454 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15455 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15456 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15457 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15458 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15459 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15460 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15461 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15462 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15463 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15464 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15465 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15466 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15467 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15468 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15469 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15470 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15471 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15472 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15473 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15474 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15475 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15476 // CHECK19: cond.true:
15477 // CHECK19-NEXT: br label [[COND_END:%.*]]
15478 // CHECK19: cond.false:
15479 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15480 // CHECK19-NEXT: br label [[COND_END]]
15481 // CHECK19: cond.end:
15482 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15483 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15484 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15485 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15486 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15487 // CHECK19: omp.inner.for.cond:
15488 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]]
15489 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
15490 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15491 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15492 // CHECK19: omp.inner.for.body:
15493 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP53]]
15494 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
15495 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP53]]
15496 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15497 // CHECK19: omp.inner.for.inc:
15498 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
15499 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP53]]
15500 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
15501 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
15502 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
15503 // CHECK19: omp.inner.for.end:
15504 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15505 // CHECK19: omp.loop.exit:
15506 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15507 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15508 // CHECK19-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
15509 // CHECK19-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15510 // CHECK19: .omp.final.then:
15511 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15512 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15513 // CHECK19: .omp.final.done:
15514 // CHECK19-NEXT: ret void
15517 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
15518 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15519 // CHECK19-NEXT: entry:
15520 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15521 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15522 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15523 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15524 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15525 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15526 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15527 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15528 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15529 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15530 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15531 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15532 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15533 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15534 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15535 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15536 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15537 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15538 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15539 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15540 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15541 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15542 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15543 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15544 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15545 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15546 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15547 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
15548 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15549 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15550 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
15551 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15552 // CHECK19: cond.true:
15553 // CHECK19-NEXT: br label [[COND_END:%.*]]
15554 // CHECK19: cond.false:
15555 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15556 // CHECK19-NEXT: br label [[COND_END]]
15557 // CHECK19: cond.end:
15558 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
15559 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
15560 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15561 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
15562 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15563 // CHECK19: omp.inner.for.cond:
15564 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56:![0-9]+]]
15565 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP56]]
15566 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
15567 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15568 // CHECK19: omp.inner.for.body:
15569 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
15570 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
15571 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15572 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
15573 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
15574 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
15575 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP56]]
15576 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15577 // CHECK19: omp.body.continue:
15578 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15579 // CHECK19: omp.inner.for.inc:
15580 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
15581 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
15582 // CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
15583 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
15584 // CHECK19: omp.inner.for.end:
15585 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15586 // CHECK19: omp.loop.exit:
15587 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP4]])
15588 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15589 // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
15590 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15591 // CHECK19: .omp.final.then:
15592 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15593 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15594 // CHECK19: .omp.final.done:
15595 // CHECK19-NEXT: ret void
15598 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
15599 // CHECK19-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15600 // CHECK19-NEXT: entry:
15601 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
15602 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15603 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15604 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15605 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
15606 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15607 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15608 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
15609 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
15610 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
15611 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15612 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15613 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
15614 // CHECK19-NEXT: ret void
15617 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
15618 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15619 // CHECK19-NEXT: entry:
15620 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15621 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15622 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15623 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15624 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15625 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15626 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15627 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15628 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15629 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15630 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15631 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15632 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15633 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15634 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15635 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
15636 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15637 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15638 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15639 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15640 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15641 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15642 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15643 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15644 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15645 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15646 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15647 // CHECK19: cond.true:
15648 // CHECK19-NEXT: br label [[COND_END:%.*]]
15649 // CHECK19: cond.false:
15650 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15651 // CHECK19-NEXT: br label [[COND_END]]
15652 // CHECK19: cond.end:
15653 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15654 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15655 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15656 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15657 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15658 // CHECK19: omp.inner.for.cond:
15659 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59:![0-9]+]]
15660 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
15661 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15662 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15663 // CHECK19: omp.inner.for.body:
15664 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP59]]
15665 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
15666 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP59]]
15667 // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
15668 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
15669 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP59]]
15670 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15671 // CHECK19: omp.inner.for.inc:
15672 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
15673 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP59]]
15674 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
15675 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
15676 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
15677 // CHECK19: omp.inner.for.end:
15678 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15679 // CHECK19: omp.loop.exit:
15680 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15681 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15682 // CHECK19-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
15683 // CHECK19-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15684 // CHECK19: .omp.final.then:
15685 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15686 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15687 // CHECK19: .omp.final.done:
15688 // CHECK19-NEXT: ret void
15691 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
15692 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15693 // CHECK19-NEXT: entry:
15694 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15695 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15696 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15697 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15698 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15699 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15700 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15701 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15702 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15703 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15704 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15705 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15706 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15707 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15708 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15709 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15710 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15711 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15712 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
15713 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15714 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15715 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15716 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15717 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15718 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15719 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15720 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15721 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15722 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
15723 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15724 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
15725 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
15726 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
15727 // CHECK19: omp.dispatch.cond:
15728 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15729 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15730 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
15731 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15732 // CHECK19: cond.true:
15733 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15734 // CHECK19-NEXT: br label [[COND_END:%.*]]
15735 // CHECK19: cond.false:
15736 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15737 // CHECK19-NEXT: br label [[COND_END]]
15738 // CHECK19: cond.end:
15739 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
15740 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
15741 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15742 // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
15743 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
15744 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15745 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
15746 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15747 // CHECK19: omp.dispatch.body:
15748 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15749 // CHECK19: omp.inner.for.cond:
15750 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62:![0-9]+]]
15751 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP62]]
15752 // CHECK19-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
15753 // CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15754 // CHECK19: omp.inner.for.body:
15755 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
15756 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
15757 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15758 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
15759 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
15760 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP16]]
15761 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP62]]
15762 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15763 // CHECK19: omp.body.continue:
15764 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15765 // CHECK19: omp.inner.for.inc:
15766 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
15767 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
15768 // CHECK19-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
15769 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
15770 // CHECK19: omp.inner.for.end:
15771 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
15772 // CHECK19: omp.dispatch.inc:
15773 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15774 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
15775 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
15776 // CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
15777 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15778 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
15779 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
15780 // CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
15781 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
15782 // CHECK19: omp.dispatch.end:
15783 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP5]])
15784 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15785 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
15786 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15787 // CHECK19: .omp.final.then:
15788 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15789 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15790 // CHECK19: .omp.final.done:
15791 // CHECK19-NEXT: ret void
15794 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
15795 // CHECK19-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15796 // CHECK19-NEXT: entry:
15797 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15798 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15799 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15800 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
15801 // CHECK19-NEXT: ret void
15804 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
15805 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15806 // CHECK19-NEXT: entry:
15807 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15808 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15809 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15810 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15811 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15812 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15813 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15814 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15815 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15816 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15817 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15818 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15819 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15820 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15821 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15822 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15823 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15824 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15825 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15826 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15827 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15828 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15829 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15830 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15831 // CHECK19: cond.true:
15832 // CHECK19-NEXT: br label [[COND_END:%.*]]
15833 // CHECK19: cond.false:
15834 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15835 // CHECK19-NEXT: br label [[COND_END]]
15836 // CHECK19: cond.end:
15837 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15838 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15839 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15840 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15841 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15842 // CHECK19: omp.inner.for.cond:
15843 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65:![0-9]+]]
15844 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
15845 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15846 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15847 // CHECK19: omp.inner.for.body:
15848 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP65]]
15849 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
15850 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP65]]
15851 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15852 // CHECK19: omp.inner.for.inc:
15853 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
15854 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP65]]
15855 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
15856 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
15857 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
15858 // CHECK19: omp.inner.for.end:
15859 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15860 // CHECK19: omp.loop.exit:
15861 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15862 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15863 // CHECK19-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
15864 // CHECK19-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15865 // CHECK19: .omp.final.then:
15866 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15867 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15868 // CHECK19: .omp.final.done:
15869 // CHECK19-NEXT: ret void
15872 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
15873 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15874 // CHECK19-NEXT: entry:
15875 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15876 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15877 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15878 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15879 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15880 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15881 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15882 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15883 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15884 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15885 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15886 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15887 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15888 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15889 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15890 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15891 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15892 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15893 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15894 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15895 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15896 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15897 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15898 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15899 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15900 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15901 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15902 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15903 // CHECK19-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15904 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
15905 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
15906 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
15907 // CHECK19: omp.dispatch.cond:
15908 // CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
15909 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
15910 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15911 // CHECK19: omp.dispatch.body:
15912 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15913 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
15914 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15915 // CHECK19: omp.inner.for.cond:
15916 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68:![0-9]+]]
15917 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP68]]
15918 // CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
15919 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15920 // CHECK19: omp.inner.for.body:
15921 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
15922 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
15923 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15924 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
15925 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
15926 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP12]]
15927 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP68]]
15928 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15929 // CHECK19: omp.body.continue:
15930 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15931 // CHECK19: omp.inner.for.inc:
15932 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
15933 // CHECK19-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
15934 // CHECK19-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
15935 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
15936 // CHECK19: omp.inner.for.end:
15937 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
15938 // CHECK19: omp.dispatch.inc:
15939 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
15940 // CHECK19: omp.dispatch.end:
15941 // CHECK19-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP6]])
15942 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15943 // CHECK19-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
15944 // CHECK19-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15945 // CHECK19: .omp.final.then:
15946 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15947 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15948 // CHECK19: .omp.final.done:
15949 // CHECK19-NEXT: ret void
15952 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
15953 // CHECK19-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15954 // CHECK19-NEXT: entry:
15955 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
15956 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15957 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15958 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15959 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
15960 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15961 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15962 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
15963 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
15964 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
15965 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15966 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15967 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
15968 // CHECK19-NEXT: ret void
15971 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
15972 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
15973 // CHECK19-NEXT: entry:
15974 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15975 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15976 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15977 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15978 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15979 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15980 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15981 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15982 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15983 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15984 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15985 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15986 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15987 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15988 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15989 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
15990 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15991 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15992 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15993 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15994 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15995 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15996 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15997 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15998 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15999 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
16000 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16001 // CHECK19: cond.true:
16002 // CHECK19-NEXT: br label [[COND_END:%.*]]
16003 // CHECK19: cond.false:
16004 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
16005 // CHECK19-NEXT: br label [[COND_END]]
16006 // CHECK19: cond.end:
16007 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16008 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
16009 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
16010 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
16011 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16012 // CHECK19: omp.inner.for.cond:
16013 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71:![0-9]+]]
16014 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
16015 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
16016 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16017 // CHECK19: omp.inner.for.body:
16018 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP71]]
16019 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
16020 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP71]]
16021 // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
16022 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
16023 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP71]]
16024 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16025 // CHECK19: omp.inner.for.inc:
16026 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
16027 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP71]]
16028 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
16029 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
16030 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
16031 // CHECK19: omp.inner.for.end:
16032 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
16033 // CHECK19: omp.loop.exit:
16034 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
16035 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
16036 // CHECK19-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
16037 // CHECK19-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16038 // CHECK19: .omp.final.then:
16039 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
16040 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
16041 // CHECK19: .omp.final.done:
16042 // CHECK19-NEXT: ret void
16045 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
16046 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] {
16047 // CHECK19-NEXT: entry:
16048 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
16049 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
16050 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16051 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16052 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
16053 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16054 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16055 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
16056 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16057 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16058 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16059 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16060 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
16061 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
16062 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
16063 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
16064 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
16065 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
16066 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
16067 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
16068 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16069 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
16070 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
16071 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
16072 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
16073 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
16074 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
16075 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
16076 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
16077 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16078 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
16079 // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
16080 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
16081 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
16082 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
16083 // CHECK19: omp.dispatch.cond:
16084 // CHECK19-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
16085 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
16086 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
16087 // CHECK19: omp.dispatch.body:
16088 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16089 // CHECK19-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
16090 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16091 // CHECK19: omp.inner.for.cond:
16092 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74:![0-9]+]]
16093 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP74]]
16094 // CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
16095 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16096 // CHECK19: omp.inner.for.body:
16097 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
16098 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
16099 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16100 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
16101 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
16102 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP13]]
16103 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP74]]
16104 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16105 // CHECK19: omp.body.continue:
16106 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16107 // CHECK19: omp.inner.for.inc:
16108 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
16109 // CHECK19-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
16110 // CHECK19-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
16111 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
16112 // CHECK19: omp.inner.for.end:
16113 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
16114 // CHECK19: omp.dispatch.inc:
16115 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
16116 // CHECK19: omp.dispatch.end:
16117 // CHECK19-NEXT: call void @__kmpc_dispatch_deinit(ptr @[[GLOB3]], i32 [[TMP7]])
16118 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
16119 // CHECK19-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
16120 // CHECK19-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16121 // CHECK19: .omp.final.then:
16122 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
16123 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
16124 // CHECK19: .omp.final.done:
16125 // CHECK19-NEXT: ret void
16128 // CHECK21-LABEL: define {{[^@]+}}@main
16129 // CHECK21-SAME: (i32 noundef signext [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
16130 // CHECK21-NEXT: entry:
16131 // CHECK21-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
16132 // CHECK21-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16133 // CHECK21-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
16134 // CHECK21-NEXT: [[N:%.*]] = alloca i32, align 4
16135 // CHECK21-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
16136 // CHECK21-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
16137 // CHECK21-NEXT: [[M:%.*]] = alloca i32, align 4
16138 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
16139 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16140 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16141 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16142 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16143 // CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
16144 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16145 // CHECK21-NEXT: [[I3:%.*]] = alloca i32, align 4
16146 // CHECK21-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
16147 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
16148 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
16149 // CHECK21-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4
16150 // CHECK21-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4
16151 // CHECK21-NEXT: [[I18:%.*]] = alloca i32, align 4
16152 // CHECK21-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4
16153 // CHECK21-NEXT: [[I22:%.*]] = alloca i32, align 4
16154 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
16155 // CHECK21-NEXT: [[_TMP40:%.*]] = alloca i32, align 4
16156 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
16157 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
16158 // CHECK21-NEXT: [[DOTOMP_LB46:%.*]] = alloca i32, align 4
16159 // CHECK21-NEXT: [[DOTOMP_UB47:%.*]] = alloca i32, align 4
16160 // CHECK21-NEXT: [[I48:%.*]] = alloca i32, align 4
16161 // CHECK21-NEXT: [[DOTOMP_IV51:%.*]] = alloca i32, align 4
16162 // CHECK21-NEXT: [[I52:%.*]] = alloca i32, align 4
16163 // CHECK21-NEXT: [[_TMP69:%.*]] = alloca i32, align 4
16164 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_70:%.*]] = alloca i32, align 4
16165 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_71:%.*]] = alloca i32, align 4
16166 // CHECK21-NEXT: [[DOTOMP_LB75:%.*]] = alloca i32, align 4
16167 // CHECK21-NEXT: [[DOTOMP_UB76:%.*]] = alloca i32, align 4
16168 // CHECK21-NEXT: [[I77:%.*]] = alloca i32, align 4
16169 // CHECK21-NEXT: [[DOTOMP_IV80:%.*]] = alloca i32, align 4
16170 // CHECK21-NEXT: [[I81:%.*]] = alloca i32, align 4
16171 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
16172 // CHECK21-NEXT: [[_TMP99:%.*]] = alloca i32, align 4
16173 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_100:%.*]] = alloca i32, align 4
16174 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_101:%.*]] = alloca i32, align 4
16175 // CHECK21-NEXT: [[DOTOMP_LB105:%.*]] = alloca i32, align 4
16176 // CHECK21-NEXT: [[DOTOMP_UB106:%.*]] = alloca i32, align 4
16177 // CHECK21-NEXT: [[I107:%.*]] = alloca i32, align 4
16178 // CHECK21-NEXT: [[DOTOMP_IV110:%.*]] = alloca i32, align 4
16179 // CHECK21-NEXT: [[I111:%.*]] = alloca i32, align 4
16180 // CHECK21-NEXT: store i32 0, ptr [[RETVAL]], align 4
16181 // CHECK21-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
16182 // CHECK21-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
16183 // CHECK21-NEXT: store i32 100, ptr [[N]], align 4
16184 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
16185 // CHECK21-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
16186 // CHECK21-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
16187 // CHECK21-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
16188 // CHECK21-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
16189 // CHECK21-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
16190 // CHECK21-NEXT: store i32 10, ptr [[M]], align 4
16191 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
16192 // CHECK21-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
16193 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16194 // CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
16195 // CHECK21-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16196 // CHECK21-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16197 // CHECK21-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
16198 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16199 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
16200 // CHECK21-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4
16201 // CHECK21-NEXT: store i32 0, ptr [[I]], align 4
16202 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16203 // CHECK21-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16204 // CHECK21-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
16205 // CHECK21: simd.if.then:
16206 // CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16207 // CHECK21-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
16208 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16209 // CHECK21: omp.inner.for.cond:
16210 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
16211 // CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
16212 // CHECK21-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
16213 // CHECK21-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16214 // CHECK21: omp.inner.for.body:
16215 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
16216 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
16217 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16218 // CHECK21-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]]
16219 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]]
16220 // CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
16221 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM]]
16222 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
16223 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16224 // CHECK21: omp.body.continue:
16225 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16226 // CHECK21: omp.inner.for.inc:
16227 // CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
16228 // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1
16229 // CHECK21-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
16230 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
16231 // CHECK21: omp.inner.for.end:
16232 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16233 // CHECK21-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0
16234 // CHECK21-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16235 // CHECK21-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
16236 // CHECK21-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
16237 // CHECK21-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
16238 // CHECK21-NEXT: br label [[SIMD_IF_END]]
16239 // CHECK21: simd.if.end:
16240 // CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[N]], align 4
16241 // CHECK21-NEXT: store i32 [[TMP14]], ptr [[DOTCAPTURE_EXPR_11]], align 4
16242 // CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16243 // CHECK21-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP15]], 0
16244 // CHECK21-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16245 // CHECK21-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
16246 // CHECK21-NEXT: store i32 [[SUB15]], ptr [[DOTCAPTURE_EXPR_12]], align 4
16247 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB16]], align 4
16248 // CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
16249 // CHECK21-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_UB17]], align 4
16250 // CHECK21-NEXT: store i32 0, ptr [[I18]], align 4
16251 // CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16252 // CHECK21-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP17]]
16253 // CHECK21-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END38:%.*]]
16254 // CHECK21: simd.if.then20:
16255 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB16]], align 4
16256 // CHECK21-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV21]], align 4
16257 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]]
16258 // CHECK21: omp.inner.for.cond23:
16259 // CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
16260 // CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB17]], align 4, !llvm.access.group [[ACC_GRP6]]
16261 // CHECK21-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
16262 // CHECK21-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
16263 // CHECK21: omp.inner.for.body25:
16264 // CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6]]
16265 // CHECK21-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP21]], 1
16266 // CHECK21-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
16267 // CHECK21-NEXT: store i32 [[ADD27]], ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP6]]
16268 // CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP6]]
16269 // CHECK21-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP22]] to i64
16270 // CHECK21-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM28]]
16271 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX29]], align 4, !llvm.access.group [[ACC_GRP6]]
16272 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]]
16273 // CHECK21: omp.body.continue30:
16274 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]]
16275 // CHECK21: omp.inner.for.inc31:
16276 // CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6]]
16277 // CHECK21-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP23]], 1
16278 // CHECK21-NEXT: store i32 [[ADD32]], ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6]]
16279 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP7:![0-9]+]]
16280 // CHECK21: omp.inner.for.end33:
16281 // CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16282 // CHECK21-NEXT: [[SUB34:%.*]] = sub nsw i32 [[TMP24]], 0
16283 // CHECK21-NEXT: [[DIV35:%.*]] = sdiv i32 [[SUB34]], 1
16284 // CHECK21-NEXT: [[MUL36:%.*]] = mul nsw i32 [[DIV35]], 1
16285 // CHECK21-NEXT: [[ADD37:%.*]] = add nsw i32 0, [[MUL36]]
16286 // CHECK21-NEXT: store i32 [[ADD37]], ptr [[I22]], align 4
16287 // CHECK21-NEXT: br label [[SIMD_IF_END38]]
16288 // CHECK21: simd.if.end38:
16289 // CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[M]], align 4
16290 // CHECK21-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_39]], align 4
16291 // CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[N]], align 4
16292 // CHECK21-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_41]], align 4
16293 // CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16294 // CHECK21-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP27]], 0
16295 // CHECK21-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1
16296 // CHECK21-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1
16297 // CHECK21-NEXT: store i32 [[SUB45]], ptr [[DOTCAPTURE_EXPR_42]], align 4
16298 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB46]], align 4
16299 // CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_42]], align 4
16300 // CHECK21-NEXT: store i32 [[TMP28]], ptr [[DOTOMP_UB47]], align 4
16301 // CHECK21-NEXT: store i32 0, ptr [[I48]], align 4
16302 // CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16303 // CHECK21-NEXT: [[CMP49:%.*]] = icmp slt i32 0, [[TMP29]]
16304 // CHECK21-NEXT: br i1 [[CMP49]], label [[SIMD_IF_THEN50:%.*]], label [[SIMD_IF_END68:%.*]]
16305 // CHECK21: simd.if.then50:
16306 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_LB46]], align 4
16307 // CHECK21-NEXT: store i32 [[TMP30]], ptr [[DOTOMP_IV51]], align 4
16308 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND53:%.*]]
16309 // CHECK21: omp.inner.for.cond53:
16310 // CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
16311 // CHECK21-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_UB47]], align 4, !llvm.access.group [[ACC_GRP9]]
16312 // CHECK21-NEXT: [[CMP54:%.*]] = icmp sle i32 [[TMP31]], [[TMP32]]
16313 // CHECK21-NEXT: br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END63:%.*]]
16314 // CHECK21: omp.inner.for.body55:
16315 // CHECK21-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9]]
16316 // CHECK21-NEXT: [[MUL56:%.*]] = mul nsw i32 [[TMP33]], 1
16317 // CHECK21-NEXT: [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
16318 // CHECK21-NEXT: store i32 [[ADD57]], ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP9]]
16319 // CHECK21-NEXT: [[TMP34:%.*]] = load i32, ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP9]]
16320 // CHECK21-NEXT: [[IDXPROM58:%.*]] = sext i32 [[TMP34]] to i64
16321 // CHECK21-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM58]]
16322 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX59]], align 4, !llvm.access.group [[ACC_GRP9]]
16323 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE60:%.*]]
16324 // CHECK21: omp.body.continue60:
16325 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC61:%.*]]
16326 // CHECK21: omp.inner.for.inc61:
16327 // CHECK21-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9]]
16328 // CHECK21-NEXT: [[ADD62:%.*]] = add nsw i32 [[TMP35]], 1
16329 // CHECK21-NEXT: store i32 [[ADD62]], ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9]]
16330 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP10:![0-9]+]]
16331 // CHECK21: omp.inner.for.end63:
16332 // CHECK21-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16333 // CHECK21-NEXT: [[SUB64:%.*]] = sub nsw i32 [[TMP36]], 0
16334 // CHECK21-NEXT: [[DIV65:%.*]] = sdiv i32 [[SUB64]], 1
16335 // CHECK21-NEXT: [[MUL66:%.*]] = mul nsw i32 [[DIV65]], 1
16336 // CHECK21-NEXT: [[ADD67:%.*]] = add nsw i32 0, [[MUL66]]
16337 // CHECK21-NEXT: store i32 [[ADD67]], ptr [[I52]], align 4
16338 // CHECK21-NEXT: br label [[SIMD_IF_END68]]
16339 // CHECK21: simd.if.end68:
16340 // CHECK21-NEXT: [[TMP37:%.*]] = load i32, ptr [[N]], align 4
16341 // CHECK21-NEXT: store i32 [[TMP37]], ptr [[DOTCAPTURE_EXPR_70]], align 4
16342 // CHECK21-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_70]], align 4
16343 // CHECK21-NEXT: [[SUB72:%.*]] = sub nsw i32 [[TMP38]], 0
16344 // CHECK21-NEXT: [[DIV73:%.*]] = sdiv i32 [[SUB72]], 1
16345 // CHECK21-NEXT: [[SUB74:%.*]] = sub nsw i32 [[DIV73]], 1
16346 // CHECK21-NEXT: store i32 [[SUB74]], ptr [[DOTCAPTURE_EXPR_71]], align 4
16347 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB75]], align 4
16348 // CHECK21-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_71]], align 4
16349 // CHECK21-NEXT: store i32 [[TMP39]], ptr [[DOTOMP_UB76]], align 4
16350 // CHECK21-NEXT: store i32 0, ptr [[I77]], align 4
16351 // CHECK21-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_70]], align 4
16352 // CHECK21-NEXT: [[CMP78:%.*]] = icmp slt i32 0, [[TMP40]]
16353 // CHECK21-NEXT: br i1 [[CMP78]], label [[SIMD_IF_THEN79:%.*]], label [[SIMD_IF_END97:%.*]]
16354 // CHECK21: simd.if.then79:
16355 // CHECK21-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_LB75]], align 4
16356 // CHECK21-NEXT: store i32 [[TMP41]], ptr [[DOTOMP_IV80]], align 4
16357 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND82:%.*]]
16358 // CHECK21: omp.inner.for.cond82:
16359 // CHECK21-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
16360 // CHECK21-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_UB76]], align 4, !llvm.access.group [[ACC_GRP12]]
16361 // CHECK21-NEXT: [[CMP83:%.*]] = icmp sle i32 [[TMP42]], [[TMP43]]
16362 // CHECK21-NEXT: br i1 [[CMP83]], label [[OMP_INNER_FOR_BODY84:%.*]], label [[OMP_INNER_FOR_END92:%.*]]
16363 // CHECK21: omp.inner.for.body84:
16364 // CHECK21-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12]]
16365 // CHECK21-NEXT: [[MUL85:%.*]] = mul nsw i32 [[TMP44]], 1
16366 // CHECK21-NEXT: [[ADD86:%.*]] = add nsw i32 0, [[MUL85]]
16367 // CHECK21-NEXT: store i32 [[ADD86]], ptr [[I81]], align 4, !llvm.access.group [[ACC_GRP12]]
16368 // CHECK21-NEXT: [[TMP45:%.*]] = load i32, ptr [[I81]], align 4, !llvm.access.group [[ACC_GRP12]]
16369 // CHECK21-NEXT: [[IDXPROM87:%.*]] = sext i32 [[TMP45]] to i64
16370 // CHECK21-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM87]]
16371 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX88]], align 4, !llvm.access.group [[ACC_GRP12]]
16372 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE89:%.*]]
16373 // CHECK21: omp.body.continue89:
16374 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC90:%.*]]
16375 // CHECK21: omp.inner.for.inc90:
16376 // CHECK21-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12]]
16377 // CHECK21-NEXT: [[ADD91:%.*]] = add nsw i32 [[TMP46]], 1
16378 // CHECK21-NEXT: store i32 [[ADD91]], ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12]]
16379 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND82]], !llvm.loop [[LOOP13:![0-9]+]]
16380 // CHECK21: omp.inner.for.end92:
16381 // CHECK21-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_70]], align 4
16382 // CHECK21-NEXT: [[SUB93:%.*]] = sub nsw i32 [[TMP47]], 0
16383 // CHECK21-NEXT: [[DIV94:%.*]] = sdiv i32 [[SUB93]], 1
16384 // CHECK21-NEXT: [[MUL95:%.*]] = mul nsw i32 [[DIV94]], 1
16385 // CHECK21-NEXT: [[ADD96:%.*]] = add nsw i32 0, [[MUL95]]
16386 // CHECK21-NEXT: store i32 [[ADD96]], ptr [[I81]], align 4
16387 // CHECK21-NEXT: br label [[SIMD_IF_END97]]
16388 // CHECK21: simd.if.end97:
16389 // CHECK21-NEXT: [[TMP48:%.*]] = load i32, ptr [[M]], align 4
16390 // CHECK21-NEXT: store i32 [[TMP48]], ptr [[DOTCAPTURE_EXPR_98]], align 4
16391 // CHECK21-NEXT: [[TMP49:%.*]] = load i32, ptr [[N]], align 4
16392 // CHECK21-NEXT: store i32 [[TMP49]], ptr [[DOTCAPTURE_EXPR_100]], align 4
16393 // CHECK21-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_100]], align 4
16394 // CHECK21-NEXT: [[SUB102:%.*]] = sub nsw i32 [[TMP50]], 0
16395 // CHECK21-NEXT: [[DIV103:%.*]] = sdiv i32 [[SUB102]], 1
16396 // CHECK21-NEXT: [[SUB104:%.*]] = sub nsw i32 [[DIV103]], 1
16397 // CHECK21-NEXT: store i32 [[SUB104]], ptr [[DOTCAPTURE_EXPR_101]], align 4
16398 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB105]], align 4
16399 // CHECK21-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_101]], align 4
16400 // CHECK21-NEXT: store i32 [[TMP51]], ptr [[DOTOMP_UB106]], align 4
16401 // CHECK21-NEXT: store i32 0, ptr [[I107]], align 4
16402 // CHECK21-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_100]], align 4
16403 // CHECK21-NEXT: [[CMP108:%.*]] = icmp slt i32 0, [[TMP52]]
16404 // CHECK21-NEXT: br i1 [[CMP108]], label [[SIMD_IF_THEN109:%.*]], label [[SIMD_IF_END127:%.*]]
16405 // CHECK21: simd.if.then109:
16406 // CHECK21-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_LB105]], align 4
16407 // CHECK21-NEXT: store i32 [[TMP53]], ptr [[DOTOMP_IV110]], align 4
16408 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND112:%.*]]
16409 // CHECK21: omp.inner.for.cond112:
16410 // CHECK21-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
16411 // CHECK21-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_UB106]], align 4, !llvm.access.group [[ACC_GRP15]]
16412 // CHECK21-NEXT: [[CMP113:%.*]] = icmp sle i32 [[TMP54]], [[TMP55]]
16413 // CHECK21-NEXT: br i1 [[CMP113]], label [[OMP_INNER_FOR_BODY114:%.*]], label [[OMP_INNER_FOR_END122:%.*]]
16414 // CHECK21: omp.inner.for.body114:
16415 // CHECK21-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15]]
16416 // CHECK21-NEXT: [[MUL115:%.*]] = mul nsw i32 [[TMP56]], 1
16417 // CHECK21-NEXT: [[ADD116:%.*]] = add nsw i32 0, [[MUL115]]
16418 // CHECK21-NEXT: store i32 [[ADD116]], ptr [[I111]], align 4, !llvm.access.group [[ACC_GRP15]]
16419 // CHECK21-NEXT: [[TMP57:%.*]] = load i32, ptr [[I111]], align 4, !llvm.access.group [[ACC_GRP15]]
16420 // CHECK21-NEXT: [[IDXPROM117:%.*]] = sext i32 [[TMP57]] to i64
16421 // CHECK21-NEXT: [[ARRAYIDX118:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM117]]
16422 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX118]], align 4, !llvm.access.group [[ACC_GRP15]]
16423 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE119:%.*]]
16424 // CHECK21: omp.body.continue119:
16425 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC120:%.*]]
16426 // CHECK21: omp.inner.for.inc120:
16427 // CHECK21-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15]]
16428 // CHECK21-NEXT: [[ADD121:%.*]] = add nsw i32 [[TMP58]], 1
16429 // CHECK21-NEXT: store i32 [[ADD121]], ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15]]
16430 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND112]], !llvm.loop [[LOOP16:![0-9]+]]
16431 // CHECK21: omp.inner.for.end122:
16432 // CHECK21-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_100]], align 4
16433 // CHECK21-NEXT: [[SUB123:%.*]] = sub nsw i32 [[TMP59]], 0
16434 // CHECK21-NEXT: [[DIV124:%.*]] = sdiv i32 [[SUB123]], 1
16435 // CHECK21-NEXT: [[MUL125:%.*]] = mul nsw i32 [[DIV124]], 1
16436 // CHECK21-NEXT: [[ADD126:%.*]] = add nsw i32 0, [[MUL125]]
16437 // CHECK21-NEXT: store i32 [[ADD126]], ptr [[I111]], align 4
16438 // CHECK21-NEXT: br label [[SIMD_IF_END127]]
16439 // CHECK21: simd.if.end127:
16440 // CHECK21-NEXT: [[TMP60:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
16441 // CHECK21-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP60]])
16442 // CHECK21-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
16443 // CHECK21-NEXT: [[TMP61:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
16444 // CHECK21-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP61]])
16445 // CHECK21-NEXT: [[TMP62:%.*]] = load i32, ptr [[RETVAL]], align 4
16446 // CHECK21-NEXT: ret i32 [[TMP62]]
16449 // CHECK21-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
16450 // CHECK21-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
16451 // CHECK21-NEXT: entry:
16452 // CHECK21-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16453 // CHECK21-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
16454 // CHECK21-NEXT: [[M:%.*]] = alloca i32, align 4
16455 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
16456 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16457 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16458 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16459 // CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
16460 // CHECK21-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
16461 // CHECK21-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
16462 // CHECK21-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
16463 // CHECK21-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
16464 // CHECK21-NEXT: [[I6:%.*]] = alloca i32, align 4
16465 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16466 // CHECK21-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
16467 // CHECK21-NEXT: [[DOTOMP_LB19:%.*]] = alloca i32, align 4
16468 // CHECK21-NEXT: [[DOTOMP_UB20:%.*]] = alloca i32, align 4
16469 // CHECK21-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4
16470 // CHECK21-NEXT: [[I22:%.*]] = alloca i32, align 4
16471 // CHECK21-NEXT: [[_TMP34:%.*]] = alloca i32, align 4
16472 // CHECK21-NEXT: [[DOTOMP_LB35:%.*]] = alloca i32, align 4
16473 // CHECK21-NEXT: [[DOTOMP_UB36:%.*]] = alloca i32, align 4
16474 // CHECK21-NEXT: [[DOTOMP_IV37:%.*]] = alloca i32, align 4
16475 // CHECK21-NEXT: [[I38:%.*]] = alloca i32, align 4
16476 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
16477 // CHECK21-NEXT: [[_TMP51:%.*]] = alloca i32, align 4
16478 // CHECK21-NEXT: [[DOTOMP_LB52:%.*]] = alloca i32, align 4
16479 // CHECK21-NEXT: [[DOTOMP_UB53:%.*]] = alloca i32, align 4
16480 // CHECK21-NEXT: [[DOTOMP_IV54:%.*]] = alloca i32, align 4
16481 // CHECK21-NEXT: [[I55:%.*]] = alloca i32, align 4
16482 // CHECK21-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
16483 // CHECK21-NEXT: store i32 10, ptr [[M]], align 4
16484 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16485 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
16486 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16487 // CHECK21-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
16488 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16489 // CHECK21: omp.inner.for.cond:
16490 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
16491 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
16492 // CHECK21-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
16493 // CHECK21-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16494 // CHECK21: omp.inner.for.body:
16495 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
16496 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
16497 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16498 // CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
16499 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
16500 // CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
16501 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
16502 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
16503 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16504 // CHECK21: omp.body.continue:
16505 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16506 // CHECK21: omp.inner.for.inc:
16507 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
16508 // CHECK21-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
16509 // CHECK21-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
16510 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
16511 // CHECK21: omp.inner.for.end:
16512 // CHECK21-NEXT: store i32 10, ptr [[I]], align 4
16513 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
16514 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB4]], align 4
16515 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
16516 // CHECK21-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV5]], align 4
16517 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
16518 // CHECK21: omp.inner.for.cond7:
16519 // CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
16520 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP21]]
16521 // CHECK21-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
16522 // CHECK21-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
16523 // CHECK21: omp.inner.for.body9:
16524 // CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]]
16525 // CHECK21-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
16526 // CHECK21-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
16527 // CHECK21-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP21]]
16528 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP21]]
16529 // CHECK21-NEXT: [[IDXPROM12:%.*]] = sext i32 [[TMP10]] to i64
16530 // CHECK21-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM12]]
16531 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX13]], align 4, !llvm.access.group [[ACC_GRP21]]
16532 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]]
16533 // CHECK21: omp.body.continue14:
16534 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]]
16535 // CHECK21: omp.inner.for.inc15:
16536 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]]
16537 // CHECK21-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP11]], 1
16538 // CHECK21-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]]
16539 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]]
16540 // CHECK21: omp.inner.for.end17:
16541 // CHECK21-NEXT: store i32 10, ptr [[I6]], align 4
16542 // CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[M]], align 4
16543 // CHECK21-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 4
16544 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB19]], align 4
16545 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB20]], align 4
16546 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB19]], align 4
16547 // CHECK21-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV21]], align 4
16548 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]]
16549 // CHECK21: omp.inner.for.cond23:
16550 // CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
16551 // CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB20]], align 4, !llvm.access.group [[ACC_GRP24]]
16552 // CHECK21-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
16553 // CHECK21-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
16554 // CHECK21: omp.inner.for.body25:
16555 // CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24]]
16556 // CHECK21-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP16]], 1
16557 // CHECK21-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
16558 // CHECK21-NEXT: store i32 [[ADD27]], ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP24]]
16559 // CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP24]]
16560 // CHECK21-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP17]] to i64
16561 // CHECK21-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM28]]
16562 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX29]], align 4, !llvm.access.group [[ACC_GRP24]]
16563 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]]
16564 // CHECK21: omp.body.continue30:
16565 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]]
16566 // CHECK21: omp.inner.for.inc31:
16567 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24]]
16568 // CHECK21-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP18]], 1
16569 // CHECK21-NEXT: store i32 [[ADD32]], ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24]]
16570 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP25:![0-9]+]]
16571 // CHECK21: omp.inner.for.end33:
16572 // CHECK21-NEXT: store i32 10, ptr [[I22]], align 4
16573 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB35]], align 4
16574 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB36]], align 4
16575 // CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB35]], align 4
16576 // CHECK21-NEXT: store i32 [[TMP19]], ptr [[DOTOMP_IV37]], align 4
16577 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND39:%.*]]
16578 // CHECK21: omp.inner.for.cond39:
16579 // CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
16580 // CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB36]], align 4, !llvm.access.group [[ACC_GRP27]]
16581 // CHECK21-NEXT: [[CMP40:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
16582 // CHECK21-NEXT: br i1 [[CMP40]], label [[OMP_INNER_FOR_BODY41:%.*]], label [[OMP_INNER_FOR_END49:%.*]]
16583 // CHECK21: omp.inner.for.body41:
16584 // CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27]]
16585 // CHECK21-NEXT: [[MUL42:%.*]] = mul nsw i32 [[TMP22]], 1
16586 // CHECK21-NEXT: [[ADD43:%.*]] = add nsw i32 0, [[MUL42]]
16587 // CHECK21-NEXT: store i32 [[ADD43]], ptr [[I38]], align 4, !llvm.access.group [[ACC_GRP27]]
16588 // CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[I38]], align 4, !llvm.access.group [[ACC_GRP27]]
16589 // CHECK21-NEXT: [[IDXPROM44:%.*]] = sext i32 [[TMP23]] to i64
16590 // CHECK21-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM44]]
16591 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX45]], align 4, !llvm.access.group [[ACC_GRP27]]
16592 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE46:%.*]]
16593 // CHECK21: omp.body.continue46:
16594 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC47:%.*]]
16595 // CHECK21: omp.inner.for.inc47:
16596 // CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27]]
16597 // CHECK21-NEXT: [[ADD48:%.*]] = add nsw i32 [[TMP24]], 1
16598 // CHECK21-NEXT: store i32 [[ADD48]], ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27]]
16599 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND39]], !llvm.loop [[LOOP28:![0-9]+]]
16600 // CHECK21: omp.inner.for.end49:
16601 // CHECK21-NEXT: store i32 10, ptr [[I38]], align 4
16602 // CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[M]], align 4
16603 // CHECK21-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_50]], align 4
16604 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB52]], align 4
16605 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB53]], align 4
16606 // CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB52]], align 4
16607 // CHECK21-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV54]], align 4
16608 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND56:%.*]]
16609 // CHECK21: omp.inner.for.cond56:
16610 // CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
16611 // CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB53]], align 4, !llvm.access.group [[ACC_GRP30]]
16612 // CHECK21-NEXT: [[CMP57:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
16613 // CHECK21-NEXT: br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
16614 // CHECK21: omp.inner.for.body58:
16615 // CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30]]
16616 // CHECK21-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP29]], 1
16617 // CHECK21-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
16618 // CHECK21-NEXT: store i32 [[ADD60]], ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP30]]
16619 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP30]]
16620 // CHECK21-NEXT: [[IDXPROM61:%.*]] = sext i32 [[TMP30]] to i64
16621 // CHECK21-NEXT: [[ARRAYIDX62:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM61]]
16622 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX62]], align 4, !llvm.access.group [[ACC_GRP30]]
16623 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE63:%.*]]
16624 // CHECK21: omp.body.continue63:
16625 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC64:%.*]]
16626 // CHECK21: omp.inner.for.inc64:
16627 // CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30]]
16628 // CHECK21-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP31]], 1
16629 // CHECK21-NEXT: store i32 [[ADD65]], ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30]]
16630 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP31:![0-9]+]]
16631 // CHECK21: omp.inner.for.end66:
16632 // CHECK21-NEXT: store i32 10, ptr [[I55]], align 4
16633 // CHECK21-NEXT: ret i32 0
16636 // CHECK23-LABEL: define {{[^@]+}}@main
16637 // CHECK23-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
16638 // CHECK23-NEXT: entry:
16639 // CHECK23-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
16640 // CHECK23-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16641 // CHECK23-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 4
16642 // CHECK23-NEXT: [[N:%.*]] = alloca i32, align 4
16643 // CHECK23-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 4
16644 // CHECK23-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16645 // CHECK23-NEXT: [[M:%.*]] = alloca i32, align 4
16646 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
16647 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16648 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16649 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16650 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16651 // CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
16652 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16653 // CHECK23-NEXT: [[I3:%.*]] = alloca i32, align 4
16654 // CHECK23-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
16655 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
16656 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
16657 // CHECK23-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4
16658 // CHECK23-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4
16659 // CHECK23-NEXT: [[I18:%.*]] = alloca i32, align 4
16660 // CHECK23-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4
16661 // CHECK23-NEXT: [[I22:%.*]] = alloca i32, align 4
16662 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
16663 // CHECK23-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
16664 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
16665 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
16666 // CHECK23-NEXT: [[DOTOMP_LB45:%.*]] = alloca i32, align 4
16667 // CHECK23-NEXT: [[DOTOMP_UB46:%.*]] = alloca i32, align 4
16668 // CHECK23-NEXT: [[I47:%.*]] = alloca i32, align 4
16669 // CHECK23-NEXT: [[DOTOMP_IV50:%.*]] = alloca i32, align 4
16670 // CHECK23-NEXT: [[I51:%.*]] = alloca i32, align 4
16671 // CHECK23-NEXT: [[_TMP67:%.*]] = alloca i32, align 4
16672 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
16673 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
16674 // CHECK23-NEXT: [[DOTOMP_LB73:%.*]] = alloca i32, align 4
16675 // CHECK23-NEXT: [[DOTOMP_UB74:%.*]] = alloca i32, align 4
16676 // CHECK23-NEXT: [[I75:%.*]] = alloca i32, align 4
16677 // CHECK23-NEXT: [[DOTOMP_IV78:%.*]] = alloca i32, align 4
16678 // CHECK23-NEXT: [[I79:%.*]] = alloca i32, align 4
16679 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_95:%.*]] = alloca i32, align 4
16680 // CHECK23-NEXT: [[_TMP96:%.*]] = alloca i32, align 4
16681 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_97:%.*]] = alloca i32, align 4
16682 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
16683 // CHECK23-NEXT: [[DOTOMP_LB102:%.*]] = alloca i32, align 4
16684 // CHECK23-NEXT: [[DOTOMP_UB103:%.*]] = alloca i32, align 4
16685 // CHECK23-NEXT: [[I104:%.*]] = alloca i32, align 4
16686 // CHECK23-NEXT: [[DOTOMP_IV107:%.*]] = alloca i32, align 4
16687 // CHECK23-NEXT: [[I108:%.*]] = alloca i32, align 4
16688 // CHECK23-NEXT: store i32 0, ptr [[RETVAL]], align 4
16689 // CHECK23-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
16690 // CHECK23-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4
16691 // CHECK23-NEXT: store i32 100, ptr [[N]], align 4
16692 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
16693 // CHECK23-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0()
16694 // CHECK23-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4
16695 // CHECK23-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
16696 // CHECK23-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4
16697 // CHECK23-NEXT: store i32 10, ptr [[M]], align 4
16698 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4
16699 // CHECK23-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
16700 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16701 // CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
16702 // CHECK23-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16703 // CHECK23-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16704 // CHECK23-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
16705 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16706 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
16707 // CHECK23-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
16708 // CHECK23-NEXT: store i32 0, ptr [[I]], align 4
16709 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16710 // CHECK23-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
16711 // CHECK23-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
16712 // CHECK23: simd.if.then:
16713 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16714 // CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
16715 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16716 // CHECK23: omp.inner.for.cond:
16717 // CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]]
16718 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]]
16719 // CHECK23-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
16720 // CHECK23-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16721 // CHECK23: omp.inner.for.body:
16722 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
16723 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
16724 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16725 // CHECK23-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]]
16726 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]]
16727 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP10]]
16728 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]]
16729 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16730 // CHECK23: omp.body.continue:
16731 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16732 // CHECK23: omp.inner.for.inc:
16733 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
16734 // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
16735 // CHECK23-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
16736 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
16737 // CHECK23: omp.inner.for.end:
16738 // CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16739 // CHECK23-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0
16740 // CHECK23-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16741 // CHECK23-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
16742 // CHECK23-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
16743 // CHECK23-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
16744 // CHECK23-NEXT: br label [[SIMD_IF_END]]
16745 // CHECK23: simd.if.end:
16746 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[N]], align 4
16747 // CHECK23-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_11]], align 4
16748 // CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16749 // CHECK23-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP14]], 0
16750 // CHECK23-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16751 // CHECK23-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
16752 // CHECK23-NEXT: store i32 [[SUB15]], ptr [[DOTCAPTURE_EXPR_12]], align 4
16753 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB16]], align 4
16754 // CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
16755 // CHECK23-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_UB17]], align 4
16756 // CHECK23-NEXT: store i32 0, ptr [[I18]], align 4
16757 // CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16758 // CHECK23-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP16]]
16759 // CHECK23-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END37:%.*]]
16760 // CHECK23: simd.if.then20:
16761 // CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB16]], align 4
16762 // CHECK23-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV21]], align 4
16763 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]]
16764 // CHECK23: omp.inner.for.cond23:
16765 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]]
16766 // CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB17]], align 4, !llvm.access.group [[ACC_GRP7]]
16767 // CHECK23-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
16768 // CHECK23-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END32:%.*]]
16769 // CHECK23: omp.inner.for.body25:
16770 // CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7]]
16771 // CHECK23-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP20]], 1
16772 // CHECK23-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
16773 // CHECK23-NEXT: store i32 [[ADD27]], ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP7]]
16774 // CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP7]]
16775 // CHECK23-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP21]]
16776 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX28]], align 4, !llvm.access.group [[ACC_GRP7]]
16777 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE29:%.*]]
16778 // CHECK23: omp.body.continue29:
16779 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC30:%.*]]
16780 // CHECK23: omp.inner.for.inc30:
16781 // CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7]]
16782 // CHECK23-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP22]], 1
16783 // CHECK23-NEXT: store i32 [[ADD31]], ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7]]
16784 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP8:![0-9]+]]
16785 // CHECK23: omp.inner.for.end32:
16786 // CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16787 // CHECK23-NEXT: [[SUB33:%.*]] = sub nsw i32 [[TMP23]], 0
16788 // CHECK23-NEXT: [[DIV34:%.*]] = sdiv i32 [[SUB33]], 1
16789 // CHECK23-NEXT: [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 1
16790 // CHECK23-NEXT: [[ADD36:%.*]] = add nsw i32 0, [[MUL35]]
16791 // CHECK23-NEXT: store i32 [[ADD36]], ptr [[I22]], align 4
16792 // CHECK23-NEXT: br label [[SIMD_IF_END37]]
16793 // CHECK23: simd.if.end37:
16794 // CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[M]], align 4
16795 // CHECK23-NEXT: store i32 [[TMP24]], ptr [[DOTCAPTURE_EXPR_38]], align 4
16796 // CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[N]], align 4
16797 // CHECK23-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_40]], align 4
16798 // CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
16799 // CHECK23-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP26]], 0
16800 // CHECK23-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1
16801 // CHECK23-NEXT: [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1
16802 // CHECK23-NEXT: store i32 [[SUB44]], ptr [[DOTCAPTURE_EXPR_41]], align 4
16803 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB45]], align 4
16804 // CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16805 // CHECK23-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_UB46]], align 4
16806 // CHECK23-NEXT: store i32 0, ptr [[I47]], align 4
16807 // CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
16808 // CHECK23-NEXT: [[CMP48:%.*]] = icmp slt i32 0, [[TMP28]]
16809 // CHECK23-NEXT: br i1 [[CMP48]], label [[SIMD_IF_THEN49:%.*]], label [[SIMD_IF_END66:%.*]]
16810 // CHECK23: simd.if.then49:
16811 // CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_LB45]], align 4
16812 // CHECK23-NEXT: store i32 [[TMP29]], ptr [[DOTOMP_IV50]], align 4
16813 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND52:%.*]]
16814 // CHECK23: omp.inner.for.cond52:
16815 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
16816 // CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_UB46]], align 4, !llvm.access.group [[ACC_GRP10]]
16817 // CHECK23-NEXT: [[CMP53:%.*]] = icmp sle i32 [[TMP30]], [[TMP31]]
16818 // CHECK23-NEXT: br i1 [[CMP53]], label [[OMP_INNER_FOR_BODY54:%.*]], label [[OMP_INNER_FOR_END61:%.*]]
16819 // CHECK23: omp.inner.for.body54:
16820 // CHECK23-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10]]
16821 // CHECK23-NEXT: [[MUL55:%.*]] = mul nsw i32 [[TMP32]], 1
16822 // CHECK23-NEXT: [[ADD56:%.*]] = add nsw i32 0, [[MUL55]]
16823 // CHECK23-NEXT: store i32 [[ADD56]], ptr [[I51]], align 4, !llvm.access.group [[ACC_GRP10]]
16824 // CHECK23-NEXT: [[TMP33:%.*]] = load i32, ptr [[I51]], align 4, !llvm.access.group [[ACC_GRP10]]
16825 // CHECK23-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP33]]
16826 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX57]], align 4, !llvm.access.group [[ACC_GRP10]]
16827 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE58:%.*]]
16828 // CHECK23: omp.body.continue58:
16829 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC59:%.*]]
16830 // CHECK23: omp.inner.for.inc59:
16831 // CHECK23-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10]]
16832 // CHECK23-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP34]], 1
16833 // CHECK23-NEXT: store i32 [[ADD60]], ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10]]
16834 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND52]], !llvm.loop [[LOOP11:![0-9]+]]
16835 // CHECK23: omp.inner.for.end61:
16836 // CHECK23-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
16837 // CHECK23-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP35]], 0
16838 // CHECK23-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
16839 // CHECK23-NEXT: [[MUL64:%.*]] = mul nsw i32 [[DIV63]], 1
16840 // CHECK23-NEXT: [[ADD65:%.*]] = add nsw i32 0, [[MUL64]]
16841 // CHECK23-NEXT: store i32 [[ADD65]], ptr [[I51]], align 4
16842 // CHECK23-NEXT: br label [[SIMD_IF_END66]]
16843 // CHECK23: simd.if.end66:
16844 // CHECK23-NEXT: [[TMP36:%.*]] = load i32, ptr [[N]], align 4
16845 // CHECK23-NEXT: store i32 [[TMP36]], ptr [[DOTCAPTURE_EXPR_68]], align 4
16846 // CHECK23-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_68]], align 4
16847 // CHECK23-NEXT: [[SUB70:%.*]] = sub nsw i32 [[TMP37]], 0
16848 // CHECK23-NEXT: [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
16849 // CHECK23-NEXT: [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
16850 // CHECK23-NEXT: store i32 [[SUB72]], ptr [[DOTCAPTURE_EXPR_69]], align 4
16851 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB73]], align 4
16852 // CHECK23-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_69]], align 4
16853 // CHECK23-NEXT: store i32 [[TMP38]], ptr [[DOTOMP_UB74]], align 4
16854 // CHECK23-NEXT: store i32 0, ptr [[I75]], align 4
16855 // CHECK23-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_68]], align 4
16856 // CHECK23-NEXT: [[CMP76:%.*]] = icmp slt i32 0, [[TMP39]]
16857 // CHECK23-NEXT: br i1 [[CMP76]], label [[SIMD_IF_THEN77:%.*]], label [[SIMD_IF_END94:%.*]]
16858 // CHECK23: simd.if.then77:
16859 // CHECK23-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_LB73]], align 4
16860 // CHECK23-NEXT: store i32 [[TMP40]], ptr [[DOTOMP_IV78]], align 4
16861 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND80:%.*]]
16862 // CHECK23: omp.inner.for.cond80:
16863 // CHECK23-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
16864 // CHECK23-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_UB74]], align 4, !llvm.access.group [[ACC_GRP13]]
16865 // CHECK23-NEXT: [[CMP81:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
16866 // CHECK23-NEXT: br i1 [[CMP81]], label [[OMP_INNER_FOR_BODY82:%.*]], label [[OMP_INNER_FOR_END89:%.*]]
16867 // CHECK23: omp.inner.for.body82:
16868 // CHECK23-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13]]
16869 // CHECK23-NEXT: [[MUL83:%.*]] = mul nsw i32 [[TMP43]], 1
16870 // CHECK23-NEXT: [[ADD84:%.*]] = add nsw i32 0, [[MUL83]]
16871 // CHECK23-NEXT: store i32 [[ADD84]], ptr [[I79]], align 4, !llvm.access.group [[ACC_GRP13]]
16872 // CHECK23-NEXT: [[TMP44:%.*]] = load i32, ptr [[I79]], align 4, !llvm.access.group [[ACC_GRP13]]
16873 // CHECK23-NEXT: [[ARRAYIDX85:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP44]]
16874 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX85]], align 4, !llvm.access.group [[ACC_GRP13]]
16875 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE86:%.*]]
16876 // CHECK23: omp.body.continue86:
16877 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC87:%.*]]
16878 // CHECK23: omp.inner.for.inc87:
16879 // CHECK23-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13]]
16880 // CHECK23-NEXT: [[ADD88:%.*]] = add nsw i32 [[TMP45]], 1
16881 // CHECK23-NEXT: store i32 [[ADD88]], ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13]]
16882 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND80]], !llvm.loop [[LOOP14:![0-9]+]]
16883 // CHECK23: omp.inner.for.end89:
16884 // CHECK23-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_68]], align 4
16885 // CHECK23-NEXT: [[SUB90:%.*]] = sub nsw i32 [[TMP46]], 0
16886 // CHECK23-NEXT: [[DIV91:%.*]] = sdiv i32 [[SUB90]], 1
16887 // CHECK23-NEXT: [[MUL92:%.*]] = mul nsw i32 [[DIV91]], 1
16888 // CHECK23-NEXT: [[ADD93:%.*]] = add nsw i32 0, [[MUL92]]
16889 // CHECK23-NEXT: store i32 [[ADD93]], ptr [[I79]], align 4
16890 // CHECK23-NEXT: br label [[SIMD_IF_END94]]
16891 // CHECK23: simd.if.end94:
16892 // CHECK23-NEXT: [[TMP47:%.*]] = load i32, ptr [[M]], align 4
16893 // CHECK23-NEXT: store i32 [[TMP47]], ptr [[DOTCAPTURE_EXPR_95]], align 4
16894 // CHECK23-NEXT: [[TMP48:%.*]] = load i32, ptr [[N]], align 4
16895 // CHECK23-NEXT: store i32 [[TMP48]], ptr [[DOTCAPTURE_EXPR_97]], align 4
16896 // CHECK23-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_97]], align 4
16897 // CHECK23-NEXT: [[SUB99:%.*]] = sub nsw i32 [[TMP49]], 0
16898 // CHECK23-NEXT: [[DIV100:%.*]] = sdiv i32 [[SUB99]], 1
16899 // CHECK23-NEXT: [[SUB101:%.*]] = sub nsw i32 [[DIV100]], 1
16900 // CHECK23-NEXT: store i32 [[SUB101]], ptr [[DOTCAPTURE_EXPR_98]], align 4
16901 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB102]], align 4
16902 // CHECK23-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_98]], align 4
16903 // CHECK23-NEXT: store i32 [[TMP50]], ptr [[DOTOMP_UB103]], align 4
16904 // CHECK23-NEXT: store i32 0, ptr [[I104]], align 4
16905 // CHECK23-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_97]], align 4
16906 // CHECK23-NEXT: [[CMP105:%.*]] = icmp slt i32 0, [[TMP51]]
16907 // CHECK23-NEXT: br i1 [[CMP105]], label [[SIMD_IF_THEN106:%.*]], label [[SIMD_IF_END123:%.*]]
16908 // CHECK23: simd.if.then106:
16909 // CHECK23-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_LB102]], align 4
16910 // CHECK23-NEXT: store i32 [[TMP52]], ptr [[DOTOMP_IV107]], align 4
16911 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND109:%.*]]
16912 // CHECK23: omp.inner.for.cond109:
16913 // CHECK23-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]]
16914 // CHECK23-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_UB103]], align 4, !llvm.access.group [[ACC_GRP16]]
16915 // CHECK23-NEXT: [[CMP110:%.*]] = icmp sle i32 [[TMP53]], [[TMP54]]
16916 // CHECK23-NEXT: br i1 [[CMP110]], label [[OMP_INNER_FOR_BODY111:%.*]], label [[OMP_INNER_FOR_END118:%.*]]
16917 // CHECK23: omp.inner.for.body111:
16918 // CHECK23-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16]]
16919 // CHECK23-NEXT: [[MUL112:%.*]] = mul nsw i32 [[TMP55]], 1
16920 // CHECK23-NEXT: [[ADD113:%.*]] = add nsw i32 0, [[MUL112]]
16921 // CHECK23-NEXT: store i32 [[ADD113]], ptr [[I108]], align 4, !llvm.access.group [[ACC_GRP16]]
16922 // CHECK23-NEXT: [[TMP56:%.*]] = load i32, ptr [[I108]], align 4, !llvm.access.group [[ACC_GRP16]]
16923 // CHECK23-NEXT: [[ARRAYIDX114:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP56]]
16924 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX114]], align 4, !llvm.access.group [[ACC_GRP16]]
16925 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE115:%.*]]
16926 // CHECK23: omp.body.continue115:
16927 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC116:%.*]]
16928 // CHECK23: omp.inner.for.inc116:
16929 // CHECK23-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16]]
16930 // CHECK23-NEXT: [[ADD117:%.*]] = add nsw i32 [[TMP57]], 1
16931 // CHECK23-NEXT: store i32 [[ADD117]], ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16]]
16932 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND109]], !llvm.loop [[LOOP17:![0-9]+]]
16933 // CHECK23: omp.inner.for.end118:
16934 // CHECK23-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_97]], align 4
16935 // CHECK23-NEXT: [[SUB119:%.*]] = sub nsw i32 [[TMP58]], 0
16936 // CHECK23-NEXT: [[DIV120:%.*]] = sdiv i32 [[SUB119]], 1
16937 // CHECK23-NEXT: [[MUL121:%.*]] = mul nsw i32 [[DIV120]], 1
16938 // CHECK23-NEXT: [[ADD122:%.*]] = add nsw i32 0, [[MUL121]]
16939 // CHECK23-NEXT: store i32 [[ADD122]], ptr [[I108]], align 4
16940 // CHECK23-NEXT: br label [[SIMD_IF_END123]]
16941 // CHECK23: simd.if.end123:
16942 // CHECK23-NEXT: [[TMP59:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
16943 // CHECK23-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP59]])
16944 // CHECK23-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
16945 // CHECK23-NEXT: [[TMP60:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4
16946 // CHECK23-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP60]])
16947 // CHECK23-NEXT: [[TMP61:%.*]] = load i32, ptr [[RETVAL]], align 4
16948 // CHECK23-NEXT: ret i32 [[TMP61]]
16951 // CHECK23-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
16952 // CHECK23-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
16953 // CHECK23-NEXT: entry:
16954 // CHECK23-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16955 // CHECK23-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
16956 // CHECK23-NEXT: [[M:%.*]] = alloca i32, align 4
16957 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
16958 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16959 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16960 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16961 // CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
16962 // CHECK23-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
16963 // CHECK23-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
16964 // CHECK23-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
16965 // CHECK23-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
16966 // CHECK23-NEXT: [[I6:%.*]] = alloca i32, align 4
16967 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16968 // CHECK23-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
16969 // CHECK23-NEXT: [[DOTOMP_LB18:%.*]] = alloca i32, align 4
16970 // CHECK23-NEXT: [[DOTOMP_UB19:%.*]] = alloca i32, align 4
16971 // CHECK23-NEXT: [[DOTOMP_IV20:%.*]] = alloca i32, align 4
16972 // CHECK23-NEXT: [[I21:%.*]] = alloca i32, align 4
16973 // CHECK23-NEXT: [[_TMP32:%.*]] = alloca i32, align 4
16974 // CHECK23-NEXT: [[DOTOMP_LB33:%.*]] = alloca i32, align 4
16975 // CHECK23-NEXT: [[DOTOMP_UB34:%.*]] = alloca i32, align 4
16976 // CHECK23-NEXT: [[DOTOMP_IV35:%.*]] = alloca i32, align 4
16977 // CHECK23-NEXT: [[I36:%.*]] = alloca i32, align 4
16978 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4
16979 // CHECK23-NEXT: [[_TMP48:%.*]] = alloca i32, align 4
16980 // CHECK23-NEXT: [[DOTOMP_LB49:%.*]] = alloca i32, align 4
16981 // CHECK23-NEXT: [[DOTOMP_UB50:%.*]] = alloca i32, align 4
16982 // CHECK23-NEXT: [[DOTOMP_IV51:%.*]] = alloca i32, align 4
16983 // CHECK23-NEXT: [[I52:%.*]] = alloca i32, align 4
16984 // CHECK23-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
16985 // CHECK23-NEXT: store i32 10, ptr [[M]], align 4
16986 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16987 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
16988 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16989 // CHECK23-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
16990 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16991 // CHECK23: omp.inner.for.cond:
16992 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
16993 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
16994 // CHECK23-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
16995 // CHECK23-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16996 // CHECK23: omp.inner.for.body:
16997 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
16998 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
16999 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17000 // CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
17001 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
17002 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP4]]
17003 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]]
17004 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
17005 // CHECK23: omp.body.continue:
17006 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
17007 // CHECK23: omp.inner.for.inc:
17008 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
17009 // CHECK23-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
17010 // CHECK23-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
17011 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
17012 // CHECK23: omp.inner.for.end:
17013 // CHECK23-NEXT: store i32 10, ptr [[I]], align 4
17014 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
17015 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB4]], align 4
17016 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
17017 // CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV5]], align 4
17018 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
17019 // CHECK23: omp.inner.for.cond7:
17020 // CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
17021 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP22]]
17022 // CHECK23-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
17023 // CHECK23-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
17024 // CHECK23: omp.inner.for.body9:
17025 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]]
17026 // CHECK23-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
17027 // CHECK23-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
17028 // CHECK23-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP22]]
17029 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP22]]
17030 // CHECK23-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP10]]
17031 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP22]]
17032 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
17033 // CHECK23: omp.body.continue13:
17034 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
17035 // CHECK23: omp.inner.for.inc14:
17036 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]]
17037 // CHECK23-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP11]], 1
17038 // CHECK23-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]]
17039 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]]
17040 // CHECK23: omp.inner.for.end16:
17041 // CHECK23-NEXT: store i32 10, ptr [[I6]], align 4
17042 // CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[M]], align 4
17043 // CHECK23-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 4
17044 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB18]], align 4
17045 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB19]], align 4
17046 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB18]], align 4
17047 // CHECK23-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV20]], align 4
17048 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]]
17049 // CHECK23: omp.inner.for.cond22:
17050 // CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
17051 // CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB19]], align 4, !llvm.access.group [[ACC_GRP25]]
17052 // CHECK23-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
17053 // CHECK23-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
17054 // CHECK23: omp.inner.for.body24:
17055 // CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25]]
17056 // CHECK23-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP16]], 1
17057 // CHECK23-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]]
17058 // CHECK23-NEXT: store i32 [[ADD26]], ptr [[I21]], align 4, !llvm.access.group [[ACC_GRP25]]
17059 // CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[I21]], align 4, !llvm.access.group [[ACC_GRP25]]
17060 // CHECK23-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP17]]
17061 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX27]], align 4, !llvm.access.group [[ACC_GRP25]]
17062 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]]
17063 // CHECK23: omp.body.continue28:
17064 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]]
17065 // CHECK23: omp.inner.for.inc29:
17066 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25]]
17067 // CHECK23-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP18]], 1
17068 // CHECK23-NEXT: store i32 [[ADD30]], ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25]]
17069 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP26:![0-9]+]]
17070 // CHECK23: omp.inner.for.end31:
17071 // CHECK23-NEXT: store i32 10, ptr [[I21]], align 4
17072 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB33]], align 4
17073 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB34]], align 4
17074 // CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB33]], align 4
17075 // CHECK23-NEXT: store i32 [[TMP19]], ptr [[DOTOMP_IV35]], align 4
17076 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND37:%.*]]
17077 // CHECK23: omp.inner.for.cond37:
17078 // CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
17079 // CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB34]], align 4, !llvm.access.group [[ACC_GRP28]]
17080 // CHECK23-NEXT: [[CMP38:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
17081 // CHECK23-NEXT: br i1 [[CMP38]], label [[OMP_INNER_FOR_BODY39:%.*]], label [[OMP_INNER_FOR_END46:%.*]]
17082 // CHECK23: omp.inner.for.body39:
17083 // CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28]]
17084 // CHECK23-NEXT: [[MUL40:%.*]] = mul nsw i32 [[TMP22]], 1
17085 // CHECK23-NEXT: [[ADD41:%.*]] = add nsw i32 0, [[MUL40]]
17086 // CHECK23-NEXT: store i32 [[ADD41]], ptr [[I36]], align 4, !llvm.access.group [[ACC_GRP28]]
17087 // CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[I36]], align 4, !llvm.access.group [[ACC_GRP28]]
17088 // CHECK23-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP23]]
17089 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX42]], align 4, !llvm.access.group [[ACC_GRP28]]
17090 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE43:%.*]]
17091 // CHECK23: omp.body.continue43:
17092 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC44:%.*]]
17093 // CHECK23: omp.inner.for.inc44:
17094 // CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28]]
17095 // CHECK23-NEXT: [[ADD45:%.*]] = add nsw i32 [[TMP24]], 1
17096 // CHECK23-NEXT: store i32 [[ADD45]], ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28]]
17097 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND37]], !llvm.loop [[LOOP29:![0-9]+]]
17098 // CHECK23: omp.inner.for.end46:
17099 // CHECK23-NEXT: store i32 10, ptr [[I36]], align 4
17100 // CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[M]], align 4
17101 // CHECK23-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_47]], align 4
17102 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB49]], align 4
17103 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB50]], align 4
17104 // CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB49]], align 4
17105 // CHECK23-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV51]], align 4
17106 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND53:%.*]]
17107 // CHECK23: omp.inner.for.cond53:
17108 // CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
17109 // CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB50]], align 4, !llvm.access.group [[ACC_GRP31]]
17110 // CHECK23-NEXT: [[CMP54:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
17111 // CHECK23-NEXT: br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END62:%.*]]
17112 // CHECK23: omp.inner.for.body55:
17113 // CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31]]
17114 // CHECK23-NEXT: [[MUL56:%.*]] = mul nsw i32 [[TMP29]], 1
17115 // CHECK23-NEXT: [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
17116 // CHECK23-NEXT: store i32 [[ADD57]], ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP31]]
17117 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP31]]
17118 // CHECK23-NEXT: [[ARRAYIDX58:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP30]]
17119 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX58]], align 4, !llvm.access.group [[ACC_GRP31]]
17120 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE59:%.*]]
17121 // CHECK23: omp.body.continue59:
17122 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC60:%.*]]
17123 // CHECK23: omp.inner.for.inc60:
17124 // CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31]]
17125 // CHECK23-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP31]], 1
17126 // CHECK23-NEXT: store i32 [[ADD61]], ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31]]
17127 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP32:![0-9]+]]
17128 // CHECK23: omp.inner.for.end62:
17129 // CHECK23-NEXT: store i32 10, ptr [[I52]], align 4
17130 // CHECK23-NEXT: ret i32 0