1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // expected-no-diagnostics
7 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
8 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2
9 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
10 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
12 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
13 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
14 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5
15 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK6
17 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
18 // RUN: %clang_cc1 -DCK1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
19 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
20 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
22 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
23 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
24 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
25 // RUN: %clang_cc1 -DCK1 -verify -Wno-vla -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11
26 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
27 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
30 template <typename T
, int X
, long long Y
>
37 #pragma omp teams distribute parallel for simd
38 for(int i
= 0; i
< X
; i
++) {
42 #pragma omp teams distribute parallel for simd schedule(static)
43 for(int i
= 0; i
< X
; i
++) {
47 #pragma omp teams distribute parallel for simd schedule(static, X/2)
48 for(int i
= 0; i
< X
; i
++) {
53 #pragma omp teams distribute parallel for simd schedule(dynamic)
54 for(int i
= 0; i
< X
; i
++) {
59 #pragma omp teams distribute parallel for simd schedule(dynamic, X/2)
60 for(int i
= 0; i
< X
; i
++) {
84 int teams_template_struct(void) {
92 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13
94 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK14
96 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
97 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13
98 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
99 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14
102 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK17
103 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
104 // RUN: %clang_cc1 -DCK2 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK17
106 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK19
107 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
108 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK19
110 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK21
111 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
112 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK21
113 // RUN: %clang_cc1 -DCK2 -verify -Wno-vla -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK23
114 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
115 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK23
118 template <typename T
, int n
>
123 #pragma omp teams distribute parallel for simd
124 for(int i
= 0; i
< n
; i
++) {
128 #pragma omp teams distribute parallel for simd schedule(static)
129 for(int i
= 0; i
< n
; i
++) {
133 #pragma omp teams distribute parallel for simd schedule(static, m)
134 for(int i
= 0; i
< n
; i
++) {
138 #pragma omp teams distribute parallel for simd schedule(dynamic)
139 for(int i
= 0; i
< n
; i
++) {
143 #pragma omp teams distribute parallel for simd schedule(dynamic, m)
144 for(int i
= 0; i
< n
; i
++) {
150 int main (int argc
, char **argv
) {
155 #pragma omp teams distribute parallel for simd
156 for(int i
= 0; i
< n
; i
++) {
160 #pragma omp teams distribute parallel for simd dist_schedule(static)
161 for(int i
= 0; i
< n
; i
++) {
165 #pragma omp teams distribute parallel for simd dist_schedule(static, m)
166 for(int i
= 0; i
< n
; i
++) {
170 #pragma omp teams distribute parallel for simd schedule(dynamic)
171 for(int i
= 0; i
< n
; i
++) {
175 #pragma omp teams distribute parallel for simd schedule(dynamic, m)
176 for(int i
= 0; i
< n
; i
++) {
179 return tmain
<int, 10>(argc
);
220 #endif // #ifndef HEADER
221 // CHECK1-LABEL: define {{[^@]+}}@_Z21teams_template_structv
222 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
223 // CHECK1-NEXT: entry:
224 // CHECK1-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
225 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
226 // CHECK1-NEXT: ret i32 [[CALL]]
229 // CHECK1-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
230 // CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat {
231 // CHECK1-NEXT: entry:
232 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
233 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
234 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
235 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
236 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
237 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
238 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 8
239 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 8
240 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 8
241 // CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
242 // CHECK1-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
243 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 8
244 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 8
245 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 8
246 // CHECK1-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
247 // CHECK1-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
248 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 8
249 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 8
250 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 8
251 // CHECK1-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
252 // CHECK1-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
253 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 8
254 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 8
255 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 8
256 // CHECK1-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
257 // CHECK1-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
258 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
259 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
260 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
261 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
262 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8
263 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
264 // CHECK1-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
265 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
266 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
267 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
268 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
269 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
270 // CHECK1-NEXT: store i32 2, ptr [[TMP5]], align 4
271 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
272 // CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4
273 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
274 // CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
275 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
276 // CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
277 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
278 // CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8
279 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
280 // CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8
281 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
282 // CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8
283 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
284 // CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8
285 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
286 // CHECK1-NEXT: store i64 123, ptr [[TMP13]], align 8
287 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
288 // CHECK1-NEXT: store i64 0, ptr [[TMP14]], align 8
289 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
290 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
291 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
292 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
293 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
294 // CHECK1-NEXT: store i32 0, ptr [[TMP17]], align 4
295 // CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
296 // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
297 // CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
298 // CHECK1: omp_offload.failed:
299 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR3:[0-9]+]]
300 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
301 // CHECK1: omp_offload.cont:
302 // CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
303 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
304 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 8
305 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
306 // CHECK1-NEXT: store ptr [[A2]], ptr [[TMP21]], align 8
307 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
308 // CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
309 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
310 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
311 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
312 // CHECK1-NEXT: store i32 2, ptr [[TMP25]], align 4
313 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
314 // CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4
315 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
316 // CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
317 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
318 // CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
319 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
320 // CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 8
321 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
322 // CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 8
323 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
324 // CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8
325 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
326 // CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8
327 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
328 // CHECK1-NEXT: store i64 123, ptr [[TMP33]], align 8
329 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
330 // CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8
331 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
332 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
333 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
334 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
335 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
336 // CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4
337 // CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
338 // CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
339 // CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
340 // CHECK1: omp_offload.failed8:
341 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR3]]
342 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT9]]
343 // CHECK1: omp_offload.cont9:
344 // CHECK1-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
345 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
346 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 8
347 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
348 // CHECK1-NEXT: store ptr [[A10]], ptr [[TMP41]], align 8
349 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 0
350 // CHECK1-NEXT: store ptr null, ptr [[TMP42]], align 8
351 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
352 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
353 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
354 // CHECK1-NEXT: store i32 2, ptr [[TMP45]], align 4
355 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
356 // CHECK1-NEXT: store i32 1, ptr [[TMP46]], align 4
357 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
358 // CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 8
359 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
360 // CHECK1-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 8
361 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
362 // CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 8
363 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
364 // CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 8
365 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
366 // CHECK1-NEXT: store ptr null, ptr [[TMP51]], align 8
367 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
368 // CHECK1-NEXT: store ptr null, ptr [[TMP52]], align 8
369 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
370 // CHECK1-NEXT: store i64 123, ptr [[TMP53]], align 8
371 // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
372 // CHECK1-NEXT: store i64 0, ptr [[TMP54]], align 8
373 // CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
374 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
375 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
376 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
377 // CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
378 // CHECK1-NEXT: store i32 0, ptr [[TMP57]], align 4
379 // CHECK1-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
380 // CHECK1-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
381 // CHECK1-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
382 // CHECK1: omp_offload.failed16:
383 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR3]]
384 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT17]]
385 // CHECK1: omp_offload.cont17:
386 // CHECK1-NEXT: [[A18:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
387 // CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
388 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 8
389 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
390 // CHECK1-NEXT: store ptr [[A18]], ptr [[TMP61]], align 8
391 // CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
392 // CHECK1-NEXT: store ptr null, ptr [[TMP62]], align 8
393 // CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
394 // CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
395 // CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
396 // CHECK1-NEXT: store i32 2, ptr [[TMP65]], align 4
397 // CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
398 // CHECK1-NEXT: store i32 1, ptr [[TMP66]], align 4
399 // CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
400 // CHECK1-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 8
401 // CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
402 // CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 8
403 // CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
404 // CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 8
405 // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
406 // CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 8
407 // CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
408 // CHECK1-NEXT: store ptr null, ptr [[TMP71]], align 8
409 // CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
410 // CHECK1-NEXT: store ptr null, ptr [[TMP72]], align 8
411 // CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
412 // CHECK1-NEXT: store i64 123, ptr [[TMP73]], align 8
413 // CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
414 // CHECK1-NEXT: store i64 0, ptr [[TMP74]], align 8
415 // CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
416 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
417 // CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
418 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
419 // CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
420 // CHECK1-NEXT: store i32 0, ptr [[TMP77]], align 4
421 // CHECK1-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
422 // CHECK1-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
423 // CHECK1-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
424 // CHECK1: omp_offload.failed24:
425 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR3]]
426 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT25]]
427 // CHECK1: omp_offload.cont25:
428 // CHECK1-NEXT: [[A26:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
429 // CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
430 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 8
431 // CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
432 // CHECK1-NEXT: store ptr [[A26]], ptr [[TMP81]], align 8
433 // CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i64 0, i64 0
434 // CHECK1-NEXT: store ptr null, ptr [[TMP82]], align 8
435 // CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
436 // CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
437 // CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
438 // CHECK1-NEXT: store i32 2, ptr [[TMP85]], align 4
439 // CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
440 // CHECK1-NEXT: store i32 1, ptr [[TMP86]], align 4
441 // CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
442 // CHECK1-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 8
443 // CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
444 // CHECK1-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 8
445 // CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
446 // CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 8
447 // CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
448 // CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 8
449 // CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
450 // CHECK1-NEXT: store ptr null, ptr [[TMP91]], align 8
451 // CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
452 // CHECK1-NEXT: store ptr null, ptr [[TMP92]], align 8
453 // CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
454 // CHECK1-NEXT: store i64 123, ptr [[TMP93]], align 8
455 // CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
456 // CHECK1-NEXT: store i64 0, ptr [[TMP94]], align 8
457 // CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
458 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
459 // CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
460 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
461 // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
462 // CHECK1-NEXT: store i32 0, ptr [[TMP97]], align 4
463 // CHECK1-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
464 // CHECK1-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
465 // CHECK1-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
466 // CHECK1: omp_offload.failed32:
467 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR3]]
468 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT33]]
469 // CHECK1: omp_offload.cont33:
470 // CHECK1-NEXT: [[A34:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
471 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i64 0, i64 0
472 // CHECK1-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
473 // CHECK1-NEXT: ret i32 [[TMP100]]
476 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
477 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
478 // CHECK1-NEXT: entry:
479 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
480 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
481 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
482 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
483 // CHECK1-NEXT: ret void
486 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
487 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2:[0-9]+]] {
488 // CHECK1-NEXT: entry:
489 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
490 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
491 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
492 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
493 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
494 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
495 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
496 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
497 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
498 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
499 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
500 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
501 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
502 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
503 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
504 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
505 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
506 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
507 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
508 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
509 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
510 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
511 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
512 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
513 // CHECK1: cond.true:
514 // CHECK1-NEXT: br label [[COND_END:%.*]]
515 // CHECK1: cond.false:
516 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
517 // CHECK1-NEXT: br label [[COND_END]]
519 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
520 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
521 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
522 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
523 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
524 // CHECK1: omp.inner.for.cond:
525 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]]
526 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
527 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
528 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
529 // CHECK1: omp.inner.for.body:
530 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP8]]
531 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
532 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
533 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
534 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP8]]
535 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
536 // CHECK1: omp.inner.for.inc:
537 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
538 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP8]]
539 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
540 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
541 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
542 // CHECK1: omp.inner.for.end:
543 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
544 // CHECK1: omp.loop.exit:
545 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
546 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
547 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
548 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
549 // CHECK1: .omp.final.then:
550 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
551 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
552 // CHECK1: .omp.final.done:
553 // CHECK1-NEXT: ret void
556 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
557 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
558 // CHECK1-NEXT: entry:
559 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
560 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
561 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
562 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
563 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
564 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
565 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
566 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
567 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
568 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
569 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
570 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
571 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
572 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
573 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
574 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
575 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
576 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
577 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
578 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
579 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
580 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
581 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
582 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
583 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
584 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
585 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
586 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
587 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
588 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
589 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
590 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
591 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
592 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
593 // CHECK1: cond.true:
594 // CHECK1-NEXT: br label [[COND_END:%.*]]
595 // CHECK1: cond.false:
596 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
597 // CHECK1-NEXT: br label [[COND_END]]
599 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
600 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
601 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
602 // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
603 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
604 // CHECK1: omp.inner.for.cond:
605 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
606 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]]
607 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
608 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
609 // CHECK1: omp.inner.for.body:
610 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
611 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
612 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
613 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
614 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
615 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
616 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
617 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
618 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]]
619 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
620 // CHECK1: omp.body.continue:
621 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
622 // CHECK1: omp.inner.for.inc:
623 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
624 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
625 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
626 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
627 // CHECK1: omp.inner.for.end:
628 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
629 // CHECK1: omp.loop.exit:
630 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
631 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
632 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
633 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
634 // CHECK1: .omp.final.then:
635 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
636 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
637 // CHECK1: .omp.final.done:
638 // CHECK1-NEXT: ret void
641 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
642 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
643 // CHECK1-NEXT: entry:
644 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
645 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
646 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
647 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
648 // CHECK1-NEXT: ret void
651 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
652 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
653 // CHECK1-NEXT: entry:
654 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
655 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
656 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
657 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
658 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
659 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
660 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
661 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
662 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
663 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
664 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
665 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
666 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
667 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
668 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
669 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
670 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
671 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
672 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
673 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
674 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
675 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
676 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
677 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
678 // CHECK1: cond.true:
679 // CHECK1-NEXT: br label [[COND_END:%.*]]
680 // CHECK1: cond.false:
681 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
682 // CHECK1-NEXT: br label [[COND_END]]
684 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
685 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
686 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
687 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
688 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
689 // CHECK1: omp.inner.for.cond:
690 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
691 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
692 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
693 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
694 // CHECK1: omp.inner.for.body:
695 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP17]]
696 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
697 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
698 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
699 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP17]]
700 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
701 // CHECK1: omp.inner.for.inc:
702 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
703 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP17]]
704 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
705 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
706 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
707 // CHECK1: omp.inner.for.end:
708 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
709 // CHECK1: omp.loop.exit:
710 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
711 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
712 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
713 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
714 // CHECK1: .omp.final.then:
715 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
716 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
717 // CHECK1: .omp.final.done:
718 // CHECK1-NEXT: ret void
721 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
722 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
723 // CHECK1-NEXT: entry:
724 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
725 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
726 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
727 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
728 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
729 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
730 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
731 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
732 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
733 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
734 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
735 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
736 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
737 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
738 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
739 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
740 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
741 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
742 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
743 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
744 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
745 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
746 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
747 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
748 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
749 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
750 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
751 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
752 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
753 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
754 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
755 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
756 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
757 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
758 // CHECK1: cond.true:
759 // CHECK1-NEXT: br label [[COND_END:%.*]]
760 // CHECK1: cond.false:
761 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
762 // CHECK1-NEXT: br label [[COND_END]]
764 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
765 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
766 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
767 // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
768 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
769 // CHECK1: omp.inner.for.cond:
770 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]]
771 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]]
772 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
773 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
774 // CHECK1: omp.inner.for.body:
775 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
776 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
777 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
778 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
779 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
780 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
781 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
782 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
783 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]]
784 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
785 // CHECK1: omp.body.continue:
786 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
787 // CHECK1: omp.inner.for.inc:
788 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
789 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
790 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
791 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
792 // CHECK1: omp.inner.for.end:
793 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
794 // CHECK1: omp.loop.exit:
795 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
796 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
797 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
798 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
799 // CHECK1: .omp.final.then:
800 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
801 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
802 // CHECK1: .omp.final.done:
803 // CHECK1-NEXT: ret void
806 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
807 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
808 // CHECK1-NEXT: entry:
809 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
810 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
811 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
812 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
813 // CHECK1-NEXT: ret void
816 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
817 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
818 // CHECK1-NEXT: entry:
819 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
820 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
821 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
822 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
823 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
824 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
825 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
826 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
827 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
828 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
829 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
830 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
831 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
832 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
833 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
834 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
835 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
836 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
837 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
838 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
839 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
840 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
841 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
842 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
843 // CHECK1: cond.true:
844 // CHECK1-NEXT: br label [[COND_END:%.*]]
845 // CHECK1: cond.false:
846 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
847 // CHECK1-NEXT: br label [[COND_END]]
849 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
850 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
851 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
852 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
853 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
854 // CHECK1: omp.inner.for.cond:
855 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
856 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
857 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
858 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
859 // CHECK1: omp.inner.for.body:
860 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
861 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
862 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
863 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
864 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP23]]
865 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
866 // CHECK1: omp.inner.for.inc:
867 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
868 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
869 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
870 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
871 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
872 // CHECK1: omp.inner.for.end:
873 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
874 // CHECK1: omp.loop.exit:
875 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
876 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
877 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
878 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
879 // CHECK1: .omp.final.then:
880 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
881 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
882 // CHECK1: .omp.final.done:
883 // CHECK1-NEXT: ret void
886 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
887 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
888 // CHECK1-NEXT: entry:
889 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
890 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
891 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
892 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
893 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
894 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
895 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
896 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
897 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
898 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
899 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
900 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
901 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
902 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
903 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
904 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
905 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
906 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
907 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
908 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
909 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
910 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
911 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
912 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
913 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
914 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
915 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
916 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
917 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
918 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
919 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
920 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
921 // CHECK1: omp.dispatch.cond:
922 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
923 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
924 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
925 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
926 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
927 // CHECK1: cond.true:
928 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
929 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
930 // CHECK1-NEXT: br label [[COND_END:%.*]]
931 // CHECK1: cond.false:
932 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
933 // CHECK1-NEXT: br label [[COND_END]]
935 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
936 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
937 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
938 // CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
939 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
940 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
941 // CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
942 // CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
943 // CHECK1: omp.dispatch.body:
944 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
945 // CHECK1: omp.inner.for.cond:
946 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
947 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
948 // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
949 // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
950 // CHECK1: omp.inner.for.body:
951 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
952 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
953 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
954 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
955 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
956 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
957 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
958 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
959 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
960 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
961 // CHECK1: omp.body.continue:
962 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
963 // CHECK1: omp.inner.for.inc:
964 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
965 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
966 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
967 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
968 // CHECK1: omp.inner.for.end:
969 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
970 // CHECK1: omp.dispatch.inc:
971 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
972 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
973 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
974 // CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
975 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
976 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
977 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
978 // CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
979 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
980 // CHECK1: omp.dispatch.end:
981 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
982 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
983 // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
984 // CHECK1-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
985 // CHECK1: .omp.final.then:
986 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
987 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
988 // CHECK1: .omp.final.done:
989 // CHECK1-NEXT: ret void
992 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
993 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
994 // CHECK1-NEXT: entry:
995 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
996 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
997 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
998 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
999 // CHECK1-NEXT: ret void
1002 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
1003 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1004 // CHECK1-NEXT: entry:
1005 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1006 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1007 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1008 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1009 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1010 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1011 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1012 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1013 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1014 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1015 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1016 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1017 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1018 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1019 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1020 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1021 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1022 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1023 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1024 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1025 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1026 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1027 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1028 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1029 // CHECK1: cond.true:
1030 // CHECK1-NEXT: br label [[COND_END:%.*]]
1031 // CHECK1: cond.false:
1032 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1033 // CHECK1-NEXT: br label [[COND_END]]
1034 // CHECK1: cond.end:
1035 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1036 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1037 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1038 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1039 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1040 // CHECK1: omp.inner.for.cond:
1041 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
1042 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
1043 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1044 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1045 // CHECK1: omp.inner.for.body:
1046 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
1047 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1048 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
1049 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1050 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP29]]
1051 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1052 // CHECK1: omp.inner.for.inc:
1053 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
1054 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
1055 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1056 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
1057 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
1058 // CHECK1: omp.inner.for.end:
1059 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1060 // CHECK1: omp.loop.exit:
1061 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1062 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1063 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1064 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1065 // CHECK1: .omp.final.then:
1066 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1067 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1068 // CHECK1: .omp.final.done:
1069 // CHECK1-NEXT: ret void
1072 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
1073 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1074 // CHECK1-NEXT: entry:
1075 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1076 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1077 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1078 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1079 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1080 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1081 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1082 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1083 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1084 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1085 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1086 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1087 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1088 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1089 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1090 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1091 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1092 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1093 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1094 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1095 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1096 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1097 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1098 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1099 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1100 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1101 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1102 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1103 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1104 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1105 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1106 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
1107 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
1108 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
1109 // CHECK1: omp.dispatch.cond:
1110 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
1111 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
1112 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1113 // CHECK1: omp.dispatch.body:
1114 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1115 // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
1116 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1117 // CHECK1: omp.inner.for.cond:
1118 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
1119 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
1120 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1121 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1122 // CHECK1: omp.inner.for.body:
1123 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
1124 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
1125 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1126 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
1127 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1128 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
1129 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
1130 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1131 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
1132 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1133 // CHECK1: omp.body.continue:
1134 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1135 // CHECK1: omp.inner.for.inc:
1136 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
1137 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
1138 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
1139 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
1140 // CHECK1: omp.inner.for.end:
1141 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
1142 // CHECK1: omp.dispatch.inc:
1143 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
1144 // CHECK1: omp.dispatch.end:
1145 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1146 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1147 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1148 // CHECK1: .omp.final.then:
1149 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1150 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1151 // CHECK1: .omp.final.done:
1152 // CHECK1-NEXT: ret void
1155 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
1156 // CHECK1-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1157 // CHECK1-NEXT: entry:
1158 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1159 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1160 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1161 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
1162 // CHECK1-NEXT: ret void
1165 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
1166 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1167 // CHECK1-NEXT: entry:
1168 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1169 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1170 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1171 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1172 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1173 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1174 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1175 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1176 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1177 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1178 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1179 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1180 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1181 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1182 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1183 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1184 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1185 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1186 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1187 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1188 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1189 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1190 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1191 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1192 // CHECK1: cond.true:
1193 // CHECK1-NEXT: br label [[COND_END:%.*]]
1194 // CHECK1: cond.false:
1195 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1196 // CHECK1-NEXT: br label [[COND_END]]
1197 // CHECK1: cond.end:
1198 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1199 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1200 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1201 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1202 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1203 // CHECK1: omp.inner.for.cond:
1204 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
1205 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
1206 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1207 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1208 // CHECK1: omp.inner.for.body:
1209 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
1210 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1211 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
1212 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1213 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP35]]
1214 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1215 // CHECK1: omp.inner.for.inc:
1216 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
1217 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
1218 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1219 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
1220 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
1221 // CHECK1: omp.inner.for.end:
1222 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1223 // CHECK1: omp.loop.exit:
1224 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1225 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1226 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1227 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1228 // CHECK1: .omp.final.then:
1229 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1230 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1231 // CHECK1: .omp.final.done:
1232 // CHECK1-NEXT: ret void
1235 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
1236 // CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1237 // CHECK1-NEXT: entry:
1238 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1239 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1240 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1241 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1242 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1243 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1244 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1245 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1246 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1247 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1248 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1249 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1250 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1251 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1252 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1253 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1254 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1255 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1256 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1257 // CHECK1-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1258 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1259 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1260 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1261 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1262 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1263 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1264 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1265 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1266 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1267 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1268 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1269 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
1270 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
1271 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
1272 // CHECK1: omp.dispatch.cond:
1273 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
1274 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
1275 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
1276 // CHECK1: omp.dispatch.body:
1277 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1278 // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
1279 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1280 // CHECK1: omp.inner.for.cond:
1281 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
1282 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
1283 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1284 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1285 // CHECK1: omp.inner.for.body:
1286 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
1287 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
1288 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1289 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
1290 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1291 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
1292 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
1293 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1294 // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
1295 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1296 // CHECK1: omp.body.continue:
1297 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1298 // CHECK1: omp.inner.for.inc:
1299 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
1300 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
1301 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
1302 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
1303 // CHECK1: omp.inner.for.end:
1304 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
1305 // CHECK1: omp.dispatch.inc:
1306 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]]
1307 // CHECK1: omp.dispatch.end:
1308 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1309 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1310 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1311 // CHECK1: .omp.final.then:
1312 // CHECK1-NEXT: store i32 123, ptr [[I]], align 4
1313 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1314 // CHECK1: .omp.final.done:
1315 // CHECK1-NEXT: ret void
1318 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
1319 // CHECK1-SAME: () #[[ATTR4:[0-9]+]] {
1320 // CHECK1-NEXT: entry:
1321 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
1322 // CHECK1-NEXT: ret void
1325 // CHECK2-LABEL: define {{[^@]+}}@_Z21teams_template_structv
1326 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] {
1327 // CHECK2-NEXT: entry:
1328 // CHECK2-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
1329 // CHECK2-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
1330 // CHECK2-NEXT: ret i32 [[CALL]]
1333 // CHECK2-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
1334 // CHECK2-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat {
1335 // CHECK2-NEXT: entry:
1336 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1337 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
1338 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
1339 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
1340 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1341 // CHECK2-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1342 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 8
1343 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 8
1344 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 8
1345 // CHECK2-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
1346 // CHECK2-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1347 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 8
1348 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 8
1349 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 8
1350 // CHECK2-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
1351 // CHECK2-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1352 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 8
1353 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 8
1354 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 8
1355 // CHECK2-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
1356 // CHECK2-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1357 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 8
1358 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 8
1359 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 8
1360 // CHECK2-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
1361 // CHECK2-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1362 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1363 // CHECK2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1364 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
1365 // CHECK2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1366 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8
1367 // CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1368 // CHECK2-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
1369 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1370 // CHECK2-NEXT: store ptr null, ptr [[TMP2]], align 8
1371 // CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1372 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1373 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
1374 // CHECK2-NEXT: store i32 2, ptr [[TMP5]], align 4
1375 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
1376 // CHECK2-NEXT: store i32 1, ptr [[TMP6]], align 4
1377 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
1378 // CHECK2-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
1379 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
1380 // CHECK2-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
1381 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
1382 // CHECK2-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8
1383 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
1384 // CHECK2-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8
1385 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
1386 // CHECK2-NEXT: store ptr null, ptr [[TMP11]], align 8
1387 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
1388 // CHECK2-NEXT: store ptr null, ptr [[TMP12]], align 8
1389 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
1390 // CHECK2-NEXT: store i64 123, ptr [[TMP13]], align 8
1391 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
1392 // CHECK2-NEXT: store i64 0, ptr [[TMP14]], align 8
1393 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
1394 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
1395 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
1396 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
1397 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
1398 // CHECK2-NEXT: store i32 0, ptr [[TMP17]], align 4
1399 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
1400 // CHECK2-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
1401 // CHECK2-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1402 // CHECK2: omp_offload.failed:
1403 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR3:[0-9]+]]
1404 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
1405 // CHECK2: omp_offload.cont:
1406 // CHECK2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1407 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
1408 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 8
1409 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
1410 // CHECK2-NEXT: store ptr [[A2]], ptr [[TMP21]], align 8
1411 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0
1412 // CHECK2-NEXT: store ptr null, ptr [[TMP22]], align 8
1413 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
1414 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
1415 // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
1416 // CHECK2-NEXT: store i32 2, ptr [[TMP25]], align 4
1417 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
1418 // CHECK2-NEXT: store i32 1, ptr [[TMP26]], align 4
1419 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
1420 // CHECK2-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
1421 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
1422 // CHECK2-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
1423 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
1424 // CHECK2-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 8
1425 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
1426 // CHECK2-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 8
1427 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
1428 // CHECK2-NEXT: store ptr null, ptr [[TMP31]], align 8
1429 // CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
1430 // CHECK2-NEXT: store ptr null, ptr [[TMP32]], align 8
1431 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
1432 // CHECK2-NEXT: store i64 123, ptr [[TMP33]], align 8
1433 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
1434 // CHECK2-NEXT: store i64 0, ptr [[TMP34]], align 8
1435 // CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
1436 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
1437 // CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
1438 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
1439 // CHECK2-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
1440 // CHECK2-NEXT: store i32 0, ptr [[TMP37]], align 4
1441 // CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
1442 // CHECK2-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
1443 // CHECK2-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
1444 // CHECK2: omp_offload.failed8:
1445 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR3]]
1446 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT9]]
1447 // CHECK2: omp_offload.cont9:
1448 // CHECK2-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1449 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
1450 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 8
1451 // CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
1452 // CHECK2-NEXT: store ptr [[A10]], ptr [[TMP41]], align 8
1453 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i64 0, i64 0
1454 // CHECK2-NEXT: store ptr null, ptr [[TMP42]], align 8
1455 // CHECK2-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
1456 // CHECK2-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
1457 // CHECK2-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
1458 // CHECK2-NEXT: store i32 2, ptr [[TMP45]], align 4
1459 // CHECK2-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
1460 // CHECK2-NEXT: store i32 1, ptr [[TMP46]], align 4
1461 // CHECK2-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
1462 // CHECK2-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 8
1463 // CHECK2-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
1464 // CHECK2-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 8
1465 // CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
1466 // CHECK2-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 8
1467 // CHECK2-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
1468 // CHECK2-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 8
1469 // CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
1470 // CHECK2-NEXT: store ptr null, ptr [[TMP51]], align 8
1471 // CHECK2-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
1472 // CHECK2-NEXT: store ptr null, ptr [[TMP52]], align 8
1473 // CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
1474 // CHECK2-NEXT: store i64 123, ptr [[TMP53]], align 8
1475 // CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
1476 // CHECK2-NEXT: store i64 0, ptr [[TMP54]], align 8
1477 // CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
1478 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
1479 // CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
1480 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
1481 // CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
1482 // CHECK2-NEXT: store i32 0, ptr [[TMP57]], align 4
1483 // CHECK2-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
1484 // CHECK2-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
1485 // CHECK2-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
1486 // CHECK2: omp_offload.failed16:
1487 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR3]]
1488 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT17]]
1489 // CHECK2: omp_offload.cont17:
1490 // CHECK2-NEXT: [[A18:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1491 // CHECK2-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
1492 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 8
1493 // CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
1494 // CHECK2-NEXT: store ptr [[A18]], ptr [[TMP61]], align 8
1495 // CHECK2-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
1496 // CHECK2-NEXT: store ptr null, ptr [[TMP62]], align 8
1497 // CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
1498 // CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
1499 // CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
1500 // CHECK2-NEXT: store i32 2, ptr [[TMP65]], align 4
1501 // CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
1502 // CHECK2-NEXT: store i32 1, ptr [[TMP66]], align 4
1503 // CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
1504 // CHECK2-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 8
1505 // CHECK2-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
1506 // CHECK2-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 8
1507 // CHECK2-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
1508 // CHECK2-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 8
1509 // CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
1510 // CHECK2-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 8
1511 // CHECK2-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
1512 // CHECK2-NEXT: store ptr null, ptr [[TMP71]], align 8
1513 // CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
1514 // CHECK2-NEXT: store ptr null, ptr [[TMP72]], align 8
1515 // CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
1516 // CHECK2-NEXT: store i64 123, ptr [[TMP73]], align 8
1517 // CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
1518 // CHECK2-NEXT: store i64 0, ptr [[TMP74]], align 8
1519 // CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
1520 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
1521 // CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
1522 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
1523 // CHECK2-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
1524 // CHECK2-NEXT: store i32 0, ptr [[TMP77]], align 4
1525 // CHECK2-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
1526 // CHECK2-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
1527 // CHECK2-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
1528 // CHECK2: omp_offload.failed24:
1529 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR3]]
1530 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT25]]
1531 // CHECK2: omp_offload.cont25:
1532 // CHECK2-NEXT: [[A26:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1533 // CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
1534 // CHECK2-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 8
1535 // CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
1536 // CHECK2-NEXT: store ptr [[A26]], ptr [[TMP81]], align 8
1537 // CHECK2-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i64 0, i64 0
1538 // CHECK2-NEXT: store ptr null, ptr [[TMP82]], align 8
1539 // CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
1540 // CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
1541 // CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
1542 // CHECK2-NEXT: store i32 2, ptr [[TMP85]], align 4
1543 // CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
1544 // CHECK2-NEXT: store i32 1, ptr [[TMP86]], align 4
1545 // CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
1546 // CHECK2-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 8
1547 // CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
1548 // CHECK2-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 8
1549 // CHECK2-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
1550 // CHECK2-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 8
1551 // CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
1552 // CHECK2-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 8
1553 // CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
1554 // CHECK2-NEXT: store ptr null, ptr [[TMP91]], align 8
1555 // CHECK2-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
1556 // CHECK2-NEXT: store ptr null, ptr [[TMP92]], align 8
1557 // CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
1558 // CHECK2-NEXT: store i64 123, ptr [[TMP93]], align 8
1559 // CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
1560 // CHECK2-NEXT: store i64 0, ptr [[TMP94]], align 8
1561 // CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
1562 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
1563 // CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
1564 // CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
1565 // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
1566 // CHECK2-NEXT: store i32 0, ptr [[TMP97]], align 4
1567 // CHECK2-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
1568 // CHECK2-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
1569 // CHECK2-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
1570 // CHECK2: omp_offload.failed32:
1571 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR3]]
1572 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT33]]
1573 // CHECK2: omp_offload.cont33:
1574 // CHECK2-NEXT: [[A34:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
1575 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i64 0, i64 0
1576 // CHECK2-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
1577 // CHECK2-NEXT: ret i32 [[TMP100]]
1580 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
1581 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
1582 // CHECK2-NEXT: entry:
1583 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1584 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1585 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1586 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
1587 // CHECK2-NEXT: ret void
1590 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
1591 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2:[0-9]+]] {
1592 // CHECK2-NEXT: entry:
1593 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1594 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1595 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1596 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1597 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1598 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1599 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1600 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1601 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1602 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1603 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1604 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1605 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1606 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1607 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1608 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1609 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1610 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1611 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1612 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1613 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1614 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1615 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1616 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1617 // CHECK2: cond.true:
1618 // CHECK2-NEXT: br label [[COND_END:%.*]]
1619 // CHECK2: cond.false:
1620 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1621 // CHECK2-NEXT: br label [[COND_END]]
1622 // CHECK2: cond.end:
1623 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1624 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1625 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1626 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1627 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1628 // CHECK2: omp.inner.for.cond:
1629 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]]
1630 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
1631 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1632 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1633 // CHECK2: omp.inner.for.body:
1634 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP8]]
1635 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1636 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP8]]
1637 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1638 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP8]]
1639 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1640 // CHECK2: omp.inner.for.inc:
1641 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
1642 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP8]]
1643 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1644 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]]
1645 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]]
1646 // CHECK2: omp.inner.for.end:
1647 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1648 // CHECK2: omp.loop.exit:
1649 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1650 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1651 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1652 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1653 // CHECK2: .omp.final.then:
1654 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1655 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1656 // CHECK2: .omp.final.done:
1657 // CHECK2-NEXT: ret void
1660 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
1661 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1662 // CHECK2-NEXT: entry:
1663 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1664 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1665 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1666 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1667 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1668 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1669 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1670 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1671 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1672 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1673 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1674 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1675 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1676 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1677 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1678 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1679 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1680 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1681 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1682 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1683 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1684 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1685 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1686 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1687 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1688 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1689 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1690 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1691 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1692 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
1693 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1694 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1695 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
1696 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1697 // CHECK2: cond.true:
1698 // CHECK2-NEXT: br label [[COND_END:%.*]]
1699 // CHECK2: cond.false:
1700 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1701 // CHECK2-NEXT: br label [[COND_END]]
1702 // CHECK2: cond.end:
1703 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1704 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
1705 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1706 // CHECK2-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
1707 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1708 // CHECK2: omp.inner.for.cond:
1709 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
1710 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]]
1711 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1712 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1713 // CHECK2: omp.inner.for.body:
1714 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
1715 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1716 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1717 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
1718 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1719 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]]
1720 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
1721 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1722 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]]
1723 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1724 // CHECK2: omp.body.continue:
1725 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1726 // CHECK2: omp.inner.for.inc:
1727 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
1728 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
1729 // CHECK2-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]]
1730 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
1731 // CHECK2: omp.inner.for.end:
1732 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1733 // CHECK2: omp.loop.exit:
1734 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
1735 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1736 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1737 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1738 // CHECK2: .omp.final.then:
1739 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1740 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1741 // CHECK2: .omp.final.done:
1742 // CHECK2-NEXT: ret void
1745 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
1746 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1747 // CHECK2-NEXT: entry:
1748 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1749 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1750 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1751 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
1752 // CHECK2-NEXT: ret void
1755 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
1756 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1757 // CHECK2-NEXT: entry:
1758 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1759 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1760 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1761 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1762 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1763 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1764 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1765 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1766 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1767 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1768 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1769 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1770 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1771 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1772 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1773 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1774 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1775 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1776 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1777 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1778 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1779 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1780 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1781 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1782 // CHECK2: cond.true:
1783 // CHECK2-NEXT: br label [[COND_END:%.*]]
1784 // CHECK2: cond.false:
1785 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1786 // CHECK2-NEXT: br label [[COND_END]]
1787 // CHECK2: cond.end:
1788 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1789 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1790 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1791 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1792 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1793 // CHECK2: omp.inner.for.cond:
1794 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
1795 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
1796 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1797 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1798 // CHECK2: omp.inner.for.body:
1799 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP17]]
1800 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1801 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
1802 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1803 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP17]]
1804 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1805 // CHECK2: omp.inner.for.inc:
1806 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
1807 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP17]]
1808 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1809 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
1810 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
1811 // CHECK2: omp.inner.for.end:
1812 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1813 // CHECK2: omp.loop.exit:
1814 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1815 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1816 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1817 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1818 // CHECK2: .omp.final.then:
1819 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1820 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1821 // CHECK2: .omp.final.done:
1822 // CHECK2-NEXT: ret void
1825 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
1826 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1827 // CHECK2-NEXT: entry:
1828 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1829 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1830 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1831 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1832 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1833 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1834 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1835 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1836 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1837 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1838 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1839 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1840 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1841 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1842 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1843 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1844 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1845 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1846 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1847 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
1848 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1849 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1850 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1851 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
1852 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1853 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1854 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1855 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1856 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1857 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
1858 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1859 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1860 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
1861 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1862 // CHECK2: cond.true:
1863 // CHECK2-NEXT: br label [[COND_END:%.*]]
1864 // CHECK2: cond.false:
1865 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1866 // CHECK2-NEXT: br label [[COND_END]]
1867 // CHECK2: cond.end:
1868 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
1869 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
1870 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1871 // CHECK2-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
1872 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1873 // CHECK2: omp.inner.for.cond:
1874 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]]
1875 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]]
1876 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
1877 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1878 // CHECK2: omp.inner.for.body:
1879 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
1880 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
1881 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1882 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
1883 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
1884 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]]
1885 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
1886 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
1887 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]]
1888 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1889 // CHECK2: omp.body.continue:
1890 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1891 // CHECK2: omp.inner.for.inc:
1892 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
1893 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
1894 // CHECK2-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]]
1895 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]]
1896 // CHECK2: omp.inner.for.end:
1897 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1898 // CHECK2: omp.loop.exit:
1899 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
1900 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1901 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1902 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1903 // CHECK2: .omp.final.then:
1904 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1905 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1906 // CHECK2: .omp.final.done:
1907 // CHECK2-NEXT: ret void
1910 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
1911 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
1912 // CHECK2-NEXT: entry:
1913 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1914 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1915 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1916 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
1917 // CHECK2-NEXT: ret void
1920 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
1921 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1922 // CHECK2-NEXT: entry:
1923 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1924 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1925 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1926 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1927 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
1928 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1929 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1930 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1931 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1932 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
1933 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1934 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1935 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1936 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1937 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1938 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
1939 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1940 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1941 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1942 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
1943 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1944 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1945 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
1946 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1947 // CHECK2: cond.true:
1948 // CHECK2-NEXT: br label [[COND_END:%.*]]
1949 // CHECK2: cond.false:
1950 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1951 // CHECK2-NEXT: br label [[COND_END]]
1952 // CHECK2: cond.end:
1953 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
1954 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1955 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1956 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
1957 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1958 // CHECK2: omp.inner.for.cond:
1959 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
1960 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
1961 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1962 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1963 // CHECK2: omp.inner.for.body:
1964 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
1965 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1966 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
1967 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1968 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP23]]
1969 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1970 // CHECK2: omp.inner.for.inc:
1971 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
1972 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
1973 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1974 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
1975 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
1976 // CHECK2: omp.inner.for.end:
1977 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1978 // CHECK2: omp.loop.exit:
1979 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
1980 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1981 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1982 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1983 // CHECK2: .omp.final.then:
1984 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
1985 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
1986 // CHECK2: .omp.final.done:
1987 // CHECK2-NEXT: ret void
1990 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
1991 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
1992 // CHECK2-NEXT: entry:
1993 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1994 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1995 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1996 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1997 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1998 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1999 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2000 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2001 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2002 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2003 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2004 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2005 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2006 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2007 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2008 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2009 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2010 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2011 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2012 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2013 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2014 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2015 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2016 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2017 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2018 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2019 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2020 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2021 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2022 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
2023 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
2024 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2025 // CHECK2: omp.dispatch.cond:
2026 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2027 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2028 // CHECK2-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP6]] to i32
2029 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[CONV2]]
2030 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2031 // CHECK2: cond.true:
2032 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2033 // CHECK2-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
2034 // CHECK2-NEXT: br label [[COND_END:%.*]]
2035 // CHECK2: cond.false:
2036 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2037 // CHECK2-NEXT: br label [[COND_END]]
2038 // CHECK2: cond.end:
2039 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
2040 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2041 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2042 // CHECK2-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
2043 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
2044 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2045 // CHECK2-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
2046 // CHECK2-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2047 // CHECK2: omp.dispatch.body:
2048 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2049 // CHECK2: omp.inner.for.cond:
2050 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
2051 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
2052 // CHECK2-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
2053 // CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2054 // CHECK2: omp.inner.for.body:
2055 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
2056 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
2057 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2058 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
2059 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2060 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]]
2061 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
2062 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
2063 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
2064 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2065 // CHECK2: omp.body.continue:
2066 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2067 // CHECK2: omp.inner.for.inc:
2068 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
2069 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1
2070 // CHECK2-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
2071 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
2072 // CHECK2: omp.inner.for.end:
2073 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2074 // CHECK2: omp.dispatch.inc:
2075 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2076 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
2077 // CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
2078 // CHECK2-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
2079 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2080 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
2081 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
2082 // CHECK2-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
2083 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
2084 // CHECK2: omp.dispatch.end:
2085 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
2086 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2087 // CHECK2-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
2088 // CHECK2-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2089 // CHECK2: .omp.final.then:
2090 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2091 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2092 // CHECK2: .omp.final.done:
2093 // CHECK2-NEXT: ret void
2096 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
2097 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2098 // CHECK2-NEXT: entry:
2099 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2100 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2101 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2102 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
2103 // CHECK2-NEXT: ret void
2106 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
2107 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
2108 // CHECK2-NEXT: entry:
2109 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2110 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2111 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2112 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2113 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2114 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2115 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2116 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2117 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2118 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2119 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2120 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2121 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2122 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2123 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2124 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2125 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2126 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2127 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2128 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2129 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2130 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2131 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2132 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2133 // CHECK2: cond.true:
2134 // CHECK2-NEXT: br label [[COND_END:%.*]]
2135 // CHECK2: cond.false:
2136 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2137 // CHECK2-NEXT: br label [[COND_END]]
2138 // CHECK2: cond.end:
2139 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2140 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2141 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2142 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2143 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2144 // CHECK2: omp.inner.for.cond:
2145 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
2146 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
2147 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2148 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2149 // CHECK2: omp.inner.for.body:
2150 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
2151 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2152 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
2153 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2154 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP29]]
2155 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2156 // CHECK2: omp.inner.for.inc:
2157 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
2158 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
2159 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2160 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
2161 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
2162 // CHECK2: omp.inner.for.end:
2163 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2164 // CHECK2: omp.loop.exit:
2165 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2166 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2167 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2168 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2169 // CHECK2: .omp.final.then:
2170 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2171 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2172 // CHECK2: .omp.final.done:
2173 // CHECK2-NEXT: ret void
2176 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
2177 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
2178 // CHECK2-NEXT: entry:
2179 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2180 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2181 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2182 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2183 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2184 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2185 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2186 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2187 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2188 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2189 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2190 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2191 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2192 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2193 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2194 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2195 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2196 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2197 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2198 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2199 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2200 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2201 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2202 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2203 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2204 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2205 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2206 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2207 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2208 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2209 // CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2210 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
2211 // CHECK2-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
2212 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2213 // CHECK2: omp.dispatch.cond:
2214 // CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
2215 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
2216 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2217 // CHECK2: omp.dispatch.body:
2218 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2219 // CHECK2-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
2220 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2221 // CHECK2: omp.inner.for.cond:
2222 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
2223 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
2224 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2225 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2226 // CHECK2: omp.inner.for.body:
2227 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
2228 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
2229 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2230 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
2231 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2232 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]]
2233 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
2234 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
2235 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
2236 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2237 // CHECK2: omp.body.continue:
2238 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2239 // CHECK2: omp.inner.for.inc:
2240 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
2241 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
2242 // CHECK2-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
2243 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
2244 // CHECK2: omp.inner.for.end:
2245 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2246 // CHECK2: omp.dispatch.inc:
2247 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
2248 // CHECK2: omp.dispatch.end:
2249 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2250 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2251 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2252 // CHECK2: .omp.final.then:
2253 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2254 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2255 // CHECK2: .omp.final.done:
2256 // CHECK2-NEXT: ret void
2259 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
2260 // CHECK2-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2261 // CHECK2-NEXT: entry:
2262 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2263 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2264 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2265 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
2266 // CHECK2-NEXT: ret void
2269 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
2270 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
2271 // CHECK2-NEXT: entry:
2272 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2273 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2274 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2275 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2276 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2277 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2278 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2279 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2280 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2281 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2282 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2283 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2284 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2285 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2286 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2287 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2288 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2289 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2290 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2291 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2292 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2293 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2294 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2295 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2296 // CHECK2: cond.true:
2297 // CHECK2-NEXT: br label [[COND_END:%.*]]
2298 // CHECK2: cond.false:
2299 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2300 // CHECK2-NEXT: br label [[COND_END]]
2301 // CHECK2: cond.end:
2302 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2303 // CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2304 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2305 // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2306 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2307 // CHECK2: omp.inner.for.cond:
2308 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
2309 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
2310 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2311 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2312 // CHECK2: omp.inner.for.body:
2313 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
2314 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2315 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
2316 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2317 // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP35]]
2318 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2319 // CHECK2: omp.inner.for.inc:
2320 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
2321 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
2322 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2323 // CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
2324 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
2325 // CHECK2: omp.inner.for.end:
2326 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2327 // CHECK2: omp.loop.exit:
2328 // CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2329 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2330 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2331 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2332 // CHECK2: .omp.final.then:
2333 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2334 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2335 // CHECK2: .omp.final.done:
2336 // CHECK2-NEXT: ret void
2339 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
2340 // CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
2341 // CHECK2-NEXT: entry:
2342 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2343 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2344 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2345 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2346 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2347 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2348 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
2349 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2350 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2351 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2352 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2353 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
2354 // CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2355 // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2356 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2357 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2358 // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2359 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2360 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2361 // CHECK2-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2362 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2363 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2364 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2365 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
2366 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2367 // CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2368 // CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2369 // CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2370 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2371 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2372 // CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2373 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
2374 // CHECK2-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
2375 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
2376 // CHECK2: omp.dispatch.cond:
2377 // CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
2378 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
2379 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
2380 // CHECK2: omp.dispatch.body:
2381 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2382 // CHECK2-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
2383 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2384 // CHECK2: omp.inner.for.cond:
2385 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
2386 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
2387 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
2388 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2389 // CHECK2: omp.inner.for.body:
2390 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
2391 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
2392 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2393 // CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
2394 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2395 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]]
2396 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
2397 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
2398 // CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
2399 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2400 // CHECK2: omp.body.continue:
2401 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2402 // CHECK2: omp.inner.for.inc:
2403 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
2404 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
2405 // CHECK2-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
2406 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
2407 // CHECK2: omp.inner.for.end:
2408 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
2409 // CHECK2: omp.dispatch.inc:
2410 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]]
2411 // CHECK2: omp.dispatch.end:
2412 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2413 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2414 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2415 // CHECK2: .omp.final.then:
2416 // CHECK2-NEXT: store i32 123, ptr [[I]], align 4
2417 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]]
2418 // CHECK2: .omp.final.done:
2419 // CHECK2-NEXT: ret void
2422 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
2423 // CHECK2-SAME: () #[[ATTR4:[0-9]+]] {
2424 // CHECK2-NEXT: entry:
2425 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1)
2426 // CHECK2-NEXT: ret void
2429 // CHECK5-LABEL: define {{[^@]+}}@_Z21teams_template_structv
2430 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
2431 // CHECK5-NEXT: entry:
2432 // CHECK5-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
2433 // CHECK5-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
2434 // CHECK5-NEXT: ret i32 [[CALL]]
2437 // CHECK5-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
2438 // CHECK5-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
2439 // CHECK5-NEXT: entry:
2440 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2441 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
2442 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
2443 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
2444 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2445 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
2446 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 4
2447 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 4
2448 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 4
2449 // CHECK5-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
2450 // CHECK5-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2451 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 4
2452 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 4
2453 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 4
2454 // CHECK5-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
2455 // CHECK5-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2456 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 4
2457 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 4
2458 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 4
2459 // CHECK5-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
2460 // CHECK5-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2461 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 4
2462 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 4
2463 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 4
2464 // CHECK5-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
2465 // CHECK5-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2466 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2467 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2468 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
2469 // CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2470 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4
2471 // CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2472 // CHECK5-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
2473 // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
2474 // CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 4
2475 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
2476 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
2477 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
2478 // CHECK5-NEXT: store i32 2, ptr [[TMP5]], align 4
2479 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
2480 // CHECK5-NEXT: store i32 1, ptr [[TMP6]], align 4
2481 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
2482 // CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
2483 // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
2484 // CHECK5-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
2485 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
2486 // CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4
2487 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
2488 // CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4
2489 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
2490 // CHECK5-NEXT: store ptr null, ptr [[TMP11]], align 4
2491 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
2492 // CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 4
2493 // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
2494 // CHECK5-NEXT: store i64 123, ptr [[TMP13]], align 8
2495 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
2496 // CHECK5-NEXT: store i64 0, ptr [[TMP14]], align 8
2497 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
2498 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
2499 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
2500 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
2501 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
2502 // CHECK5-NEXT: store i32 0, ptr [[TMP17]], align 4
2503 // CHECK5-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
2504 // CHECK5-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
2505 // CHECK5-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2506 // CHECK5: omp_offload.failed:
2507 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR3:[0-9]+]]
2508 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
2509 // CHECK5: omp_offload.cont:
2510 // CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2511 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
2512 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 4
2513 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
2514 // CHECK5-NEXT: store ptr [[A2]], ptr [[TMP21]], align 4
2515 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
2516 // CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 4
2517 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
2518 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
2519 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
2520 // CHECK5-NEXT: store i32 2, ptr [[TMP25]], align 4
2521 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
2522 // CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4
2523 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
2524 // CHECK5-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
2525 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
2526 // CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
2527 // CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
2528 // CHECK5-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 4
2529 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
2530 // CHECK5-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 4
2531 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
2532 // CHECK5-NEXT: store ptr null, ptr [[TMP31]], align 4
2533 // CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
2534 // CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 4
2535 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
2536 // CHECK5-NEXT: store i64 123, ptr [[TMP33]], align 8
2537 // CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
2538 // CHECK5-NEXT: store i64 0, ptr [[TMP34]], align 8
2539 // CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
2540 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
2541 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
2542 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
2543 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
2544 // CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4
2545 // CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
2546 // CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
2547 // CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
2548 // CHECK5: omp_offload.failed8:
2549 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR3]]
2550 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT9]]
2551 // CHECK5: omp_offload.cont9:
2552 // CHECK5-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2553 // CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
2554 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 4
2555 // CHECK5-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
2556 // CHECK5-NEXT: store ptr [[A10]], ptr [[TMP41]], align 4
2557 // CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 0
2558 // CHECK5-NEXT: store ptr null, ptr [[TMP42]], align 4
2559 // CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
2560 // CHECK5-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
2561 // CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
2562 // CHECK5-NEXT: store i32 2, ptr [[TMP45]], align 4
2563 // CHECK5-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
2564 // CHECK5-NEXT: store i32 1, ptr [[TMP46]], align 4
2565 // CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
2566 // CHECK5-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 4
2567 // CHECK5-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
2568 // CHECK5-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 4
2569 // CHECK5-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
2570 // CHECK5-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 4
2571 // CHECK5-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
2572 // CHECK5-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 4
2573 // CHECK5-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
2574 // CHECK5-NEXT: store ptr null, ptr [[TMP51]], align 4
2575 // CHECK5-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
2576 // CHECK5-NEXT: store ptr null, ptr [[TMP52]], align 4
2577 // CHECK5-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
2578 // CHECK5-NEXT: store i64 123, ptr [[TMP53]], align 8
2579 // CHECK5-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
2580 // CHECK5-NEXT: store i64 0, ptr [[TMP54]], align 8
2581 // CHECK5-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
2582 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
2583 // CHECK5-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
2584 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
2585 // CHECK5-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
2586 // CHECK5-NEXT: store i32 0, ptr [[TMP57]], align 4
2587 // CHECK5-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
2588 // CHECK5-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
2589 // CHECK5-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
2590 // CHECK5: omp_offload.failed16:
2591 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR3]]
2592 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT17]]
2593 // CHECK5: omp_offload.cont17:
2594 // CHECK5-NEXT: [[A18:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2595 // CHECK5-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
2596 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 4
2597 // CHECK5-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
2598 // CHECK5-NEXT: store ptr [[A18]], ptr [[TMP61]], align 4
2599 // CHECK5-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
2600 // CHECK5-NEXT: store ptr null, ptr [[TMP62]], align 4
2601 // CHECK5-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
2602 // CHECK5-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
2603 // CHECK5-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
2604 // CHECK5-NEXT: store i32 2, ptr [[TMP65]], align 4
2605 // CHECK5-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
2606 // CHECK5-NEXT: store i32 1, ptr [[TMP66]], align 4
2607 // CHECK5-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
2608 // CHECK5-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 4
2609 // CHECK5-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
2610 // CHECK5-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 4
2611 // CHECK5-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
2612 // CHECK5-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 4
2613 // CHECK5-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
2614 // CHECK5-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 4
2615 // CHECK5-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
2616 // CHECK5-NEXT: store ptr null, ptr [[TMP71]], align 4
2617 // CHECK5-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
2618 // CHECK5-NEXT: store ptr null, ptr [[TMP72]], align 4
2619 // CHECK5-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
2620 // CHECK5-NEXT: store i64 123, ptr [[TMP73]], align 8
2621 // CHECK5-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
2622 // CHECK5-NEXT: store i64 0, ptr [[TMP74]], align 8
2623 // CHECK5-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
2624 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
2625 // CHECK5-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
2626 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
2627 // CHECK5-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
2628 // CHECK5-NEXT: store i32 0, ptr [[TMP77]], align 4
2629 // CHECK5-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
2630 // CHECK5-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
2631 // CHECK5-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
2632 // CHECK5: omp_offload.failed24:
2633 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR3]]
2634 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT25]]
2635 // CHECK5: omp_offload.cont25:
2636 // CHECK5-NEXT: [[A26:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2637 // CHECK5-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
2638 // CHECK5-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 4
2639 // CHECK5-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
2640 // CHECK5-NEXT: store ptr [[A26]], ptr [[TMP81]], align 4
2641 // CHECK5-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0
2642 // CHECK5-NEXT: store ptr null, ptr [[TMP82]], align 4
2643 // CHECK5-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
2644 // CHECK5-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
2645 // CHECK5-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
2646 // CHECK5-NEXT: store i32 2, ptr [[TMP85]], align 4
2647 // CHECK5-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
2648 // CHECK5-NEXT: store i32 1, ptr [[TMP86]], align 4
2649 // CHECK5-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
2650 // CHECK5-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 4
2651 // CHECK5-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
2652 // CHECK5-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 4
2653 // CHECK5-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
2654 // CHECK5-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 4
2655 // CHECK5-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
2656 // CHECK5-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 4
2657 // CHECK5-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
2658 // CHECK5-NEXT: store ptr null, ptr [[TMP91]], align 4
2659 // CHECK5-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
2660 // CHECK5-NEXT: store ptr null, ptr [[TMP92]], align 4
2661 // CHECK5-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
2662 // CHECK5-NEXT: store i64 123, ptr [[TMP93]], align 8
2663 // CHECK5-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
2664 // CHECK5-NEXT: store i64 0, ptr [[TMP94]], align 8
2665 // CHECK5-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
2666 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
2667 // CHECK5-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
2668 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
2669 // CHECK5-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
2670 // CHECK5-NEXT: store i32 0, ptr [[TMP97]], align 4
2671 // CHECK5-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
2672 // CHECK5-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
2673 // CHECK5-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
2674 // CHECK5: omp_offload.failed32:
2675 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR3]]
2676 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT33]]
2677 // CHECK5: omp_offload.cont33:
2678 // CHECK5-NEXT: [[A34:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
2679 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i32 0, i32 0
2680 // CHECK5-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
2681 // CHECK5-NEXT: ret i32 [[TMP100]]
2684 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
2685 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
2686 // CHECK5-NEXT: entry:
2687 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2688 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2689 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2690 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
2691 // CHECK5-NEXT: ret void
2694 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
2695 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2:[0-9]+]] {
2696 // CHECK5-NEXT: entry:
2697 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2698 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2699 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2700 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2701 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2702 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2703 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2704 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2705 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2706 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2707 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2708 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2709 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2710 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2711 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2712 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2713 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2714 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2715 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2716 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2717 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2718 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2719 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2720 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2721 // CHECK5: cond.true:
2722 // CHECK5-NEXT: br label [[COND_END:%.*]]
2723 // CHECK5: cond.false:
2724 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2725 // CHECK5-NEXT: br label [[COND_END]]
2726 // CHECK5: cond.end:
2727 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2728 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2729 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2730 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2731 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2732 // CHECK5: omp.inner.for.cond:
2733 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
2734 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
2735 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2736 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2737 // CHECK5: omp.inner.for.body:
2738 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
2739 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
2740 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP9]]
2741 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2742 // CHECK5: omp.inner.for.inc:
2743 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
2744 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
2745 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2746 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
2747 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
2748 // CHECK5: omp.inner.for.end:
2749 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2750 // CHECK5: omp.loop.exit:
2751 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2752 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2753 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2754 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2755 // CHECK5: .omp.final.then:
2756 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2757 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2758 // CHECK5: .omp.final.done:
2759 // CHECK5-NEXT: ret void
2762 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
2763 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
2764 // CHECK5-NEXT: entry:
2765 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2766 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2767 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2768 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2769 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2770 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2771 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2772 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2773 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2774 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2775 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2776 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2777 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2778 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2779 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2780 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2781 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2782 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2783 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2784 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2785 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2786 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2787 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
2788 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
2789 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2790 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2791 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2792 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
2793 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2794 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2795 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
2796 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2797 // CHECK5: cond.true:
2798 // CHECK5-NEXT: br label [[COND_END:%.*]]
2799 // CHECK5: cond.false:
2800 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2801 // CHECK5-NEXT: br label [[COND_END]]
2802 // CHECK5: cond.end:
2803 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2804 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2805 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2806 // CHECK5-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
2807 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2808 // CHECK5: omp.inner.for.cond:
2809 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
2810 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
2811 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2812 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2813 // CHECK5: omp.inner.for.body:
2814 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2815 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2816 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2817 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
2818 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2819 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
2820 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
2821 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]]
2822 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2823 // CHECK5: omp.body.continue:
2824 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2825 // CHECK5: omp.inner.for.inc:
2826 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2827 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
2828 // CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2829 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
2830 // CHECK5: omp.inner.for.end:
2831 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2832 // CHECK5: omp.loop.exit:
2833 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
2834 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2835 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2836 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2837 // CHECK5: .omp.final.then:
2838 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2839 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2840 // CHECK5: .omp.final.done:
2841 // CHECK5-NEXT: ret void
2844 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
2845 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
2846 // CHECK5-NEXT: entry:
2847 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2848 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2849 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2850 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
2851 // CHECK5-NEXT: ret void
2854 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
2855 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
2856 // CHECK5-NEXT: entry:
2857 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2858 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2859 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2860 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2861 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2862 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2863 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2864 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2865 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2866 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2867 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2868 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2869 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2870 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2871 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2872 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
2873 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2874 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2875 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2876 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2877 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2878 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2879 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
2880 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2881 // CHECK5: cond.true:
2882 // CHECK5-NEXT: br label [[COND_END:%.*]]
2883 // CHECK5: cond.false:
2884 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2885 // CHECK5-NEXT: br label [[COND_END]]
2886 // CHECK5: cond.end:
2887 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2888 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2889 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2890 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2891 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2892 // CHECK5: omp.inner.for.cond:
2893 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
2894 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
2895 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2896 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2897 // CHECK5: omp.inner.for.body:
2898 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
2899 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
2900 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP18]]
2901 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2902 // CHECK5: omp.inner.for.inc:
2903 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
2904 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
2905 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
2906 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
2907 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
2908 // CHECK5: omp.inner.for.end:
2909 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2910 // CHECK5: omp.loop.exit:
2911 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2912 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2913 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
2914 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2915 // CHECK5: .omp.final.then:
2916 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2917 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2918 // CHECK5: .omp.final.done:
2919 // CHECK5-NEXT: ret void
2922 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
2923 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
2924 // CHECK5-NEXT: entry:
2925 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
2926 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
2927 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
2928 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
2929 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
2930 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2931 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2932 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2933 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2934 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2935 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2936 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2937 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
2938 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
2939 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2940 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2941 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
2942 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
2943 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2944 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
2945 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
2946 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
2947 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
2948 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
2949 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2950 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2951 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
2952 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
2953 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2954 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2955 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
2956 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2957 // CHECK5: cond.true:
2958 // CHECK5-NEXT: br label [[COND_END:%.*]]
2959 // CHECK5: cond.false:
2960 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2961 // CHECK5-NEXT: br label [[COND_END]]
2962 // CHECK5: cond.end:
2963 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
2964 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2965 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2966 // CHECK5-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
2967 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2968 // CHECK5: omp.inner.for.cond:
2969 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
2970 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
2971 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
2972 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2973 // CHECK5: omp.inner.for.body:
2974 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2975 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
2976 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2977 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
2978 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
2979 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
2980 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
2981 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]]
2982 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2983 // CHECK5: omp.body.continue:
2984 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2985 // CHECK5: omp.inner.for.inc:
2986 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2987 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
2988 // CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2989 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
2990 // CHECK5: omp.inner.for.end:
2991 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2992 // CHECK5: omp.loop.exit:
2993 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
2994 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2995 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2996 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2997 // CHECK5: .omp.final.then:
2998 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
2999 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3000 // CHECK5: .omp.final.done:
3001 // CHECK5-NEXT: ret void
3004 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
3005 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3006 // CHECK5-NEXT: entry:
3007 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3008 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3009 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3010 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
3011 // CHECK5-NEXT: ret void
3014 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
3015 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3016 // CHECK5-NEXT: entry:
3017 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3018 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3019 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3020 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3021 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3022 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3023 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3024 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3025 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3026 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3027 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3028 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3029 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3030 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3031 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3032 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3033 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3034 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3035 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3036 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3037 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3038 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3039 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3040 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3041 // CHECK5: cond.true:
3042 // CHECK5-NEXT: br label [[COND_END:%.*]]
3043 // CHECK5: cond.false:
3044 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3045 // CHECK5-NEXT: br label [[COND_END]]
3046 // CHECK5: cond.end:
3047 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3048 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3049 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3050 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3051 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3052 // CHECK5: omp.inner.for.cond:
3053 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
3054 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
3055 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3056 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3057 // CHECK5: omp.inner.for.body:
3058 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
3059 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
3060 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP24]]
3061 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3062 // CHECK5: omp.inner.for.inc:
3063 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
3064 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
3065 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3066 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
3067 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
3068 // CHECK5: omp.inner.for.end:
3069 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3070 // CHECK5: omp.loop.exit:
3071 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3072 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3073 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3074 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3075 // CHECK5: .omp.final.then:
3076 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3077 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3078 // CHECK5: .omp.final.done:
3079 // CHECK5-NEXT: ret void
3082 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
3083 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3084 // CHECK5-NEXT: entry:
3085 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3086 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3087 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3088 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3089 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3090 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3091 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3092 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3093 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3094 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3095 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3096 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3097 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3098 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3099 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3100 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3101 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3102 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3103 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3104 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3105 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3106 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3107 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3108 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3109 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3110 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3111 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3112 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
3113 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
3114 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
3115 // CHECK5: omp.dispatch.cond:
3116 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3117 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3118 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
3119 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3120 // CHECK5: cond.true:
3121 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3122 // CHECK5-NEXT: br label [[COND_END:%.*]]
3123 // CHECK5: cond.false:
3124 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3125 // CHECK5-NEXT: br label [[COND_END]]
3126 // CHECK5: cond.end:
3127 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
3128 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3129 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3130 // CHECK5-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
3131 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
3132 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3133 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
3134 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3135 // CHECK5: omp.dispatch.body:
3136 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3137 // CHECK5: omp.inner.for.cond:
3138 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
3139 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
3140 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
3141 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3142 // CHECK5: omp.inner.for.body:
3143 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3144 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
3145 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3146 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
3147 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3148 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
3149 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP15]]
3150 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]]
3151 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3152 // CHECK5: omp.body.continue:
3153 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3154 // CHECK5: omp.inner.for.inc:
3155 // CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3156 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
3157 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3158 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
3159 // CHECK5: omp.inner.for.end:
3160 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
3161 // CHECK5: omp.dispatch.inc:
3162 // CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3163 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
3164 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
3165 // CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
3166 // CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3167 // CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
3168 // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
3169 // CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
3170 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
3171 // CHECK5: omp.dispatch.end:
3172 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
3173 // CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3174 // CHECK5-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
3175 // CHECK5-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3176 // CHECK5: .omp.final.then:
3177 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3178 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3179 // CHECK5: .omp.final.done:
3180 // CHECK5-NEXT: ret void
3183 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
3184 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3185 // CHECK5-NEXT: entry:
3186 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3187 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3188 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3189 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
3190 // CHECK5-NEXT: ret void
3193 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
3194 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3195 // CHECK5-NEXT: entry:
3196 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3197 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3198 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3199 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3200 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3201 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3202 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3203 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3204 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3205 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3206 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3207 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3208 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3209 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3210 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3211 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3212 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3213 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3214 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3215 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3216 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3217 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3218 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3219 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3220 // CHECK5: cond.true:
3221 // CHECK5-NEXT: br label [[COND_END:%.*]]
3222 // CHECK5: cond.false:
3223 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3224 // CHECK5-NEXT: br label [[COND_END]]
3225 // CHECK5: cond.end:
3226 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3227 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3228 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3229 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3230 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3231 // CHECK5: omp.inner.for.cond:
3232 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
3233 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
3234 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3235 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3236 // CHECK5: omp.inner.for.body:
3237 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
3238 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
3239 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP30]]
3240 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3241 // CHECK5: omp.inner.for.inc:
3242 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
3243 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
3244 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3245 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
3246 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
3247 // CHECK5: omp.inner.for.end:
3248 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3249 // CHECK5: omp.loop.exit:
3250 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3251 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3252 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3253 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3254 // CHECK5: .omp.final.then:
3255 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3256 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3257 // CHECK5: .omp.final.done:
3258 // CHECK5-NEXT: ret void
3261 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
3262 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3263 // CHECK5-NEXT: entry:
3264 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3265 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3266 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3267 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3268 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3269 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3270 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3271 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3272 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3273 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3274 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3275 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3276 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3277 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3278 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3279 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3280 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3281 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3282 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3283 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3284 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3285 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3286 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3287 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3288 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3289 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3290 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3291 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3292 // CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3293 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
3294 // CHECK5-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
3295 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
3296 // CHECK5: omp.dispatch.cond:
3297 // CHECK5-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
3298 // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
3299 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3300 // CHECK5: omp.dispatch.body:
3301 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3302 // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
3303 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3304 // CHECK5: omp.inner.for.cond:
3305 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
3306 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
3307 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3308 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3309 // CHECK5: omp.inner.for.body:
3310 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
3311 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
3312 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3313 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
3314 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3315 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
3316 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
3317 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]]
3318 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3319 // CHECK5: omp.body.continue:
3320 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3321 // CHECK5: omp.inner.for.inc:
3322 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
3323 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
3324 // CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
3325 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
3326 // CHECK5: omp.inner.for.end:
3327 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
3328 // CHECK5: omp.dispatch.inc:
3329 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
3330 // CHECK5: omp.dispatch.end:
3331 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3332 // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
3333 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3334 // CHECK5: .omp.final.then:
3335 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3336 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3337 // CHECK5: .omp.final.done:
3338 // CHECK5-NEXT: ret void
3341 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
3342 // CHECK5-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3343 // CHECK5-NEXT: entry:
3344 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3345 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3346 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3347 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
3348 // CHECK5-NEXT: ret void
3351 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
3352 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3353 // CHECK5-NEXT: entry:
3354 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3355 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3356 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3357 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3358 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3359 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3360 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3361 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3362 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3363 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3364 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3365 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3366 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3367 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3368 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3369 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3370 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3371 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3372 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3373 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3374 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3375 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3376 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3377 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3378 // CHECK5: cond.true:
3379 // CHECK5-NEXT: br label [[COND_END:%.*]]
3380 // CHECK5: cond.false:
3381 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3382 // CHECK5-NEXT: br label [[COND_END]]
3383 // CHECK5: cond.end:
3384 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3385 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3386 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3387 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3388 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3389 // CHECK5: omp.inner.for.cond:
3390 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
3391 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
3392 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3393 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3394 // CHECK5: omp.inner.for.body:
3395 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
3396 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
3397 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP36]]
3398 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3399 // CHECK5: omp.inner.for.inc:
3400 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
3401 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
3402 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3403 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
3404 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
3405 // CHECK5: omp.inner.for.end:
3406 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3407 // CHECK5: omp.loop.exit:
3408 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3409 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3410 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3411 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3412 // CHECK5: .omp.final.then:
3413 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3414 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3415 // CHECK5: .omp.final.done:
3416 // CHECK5-NEXT: ret void
3419 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
3420 // CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3421 // CHECK5-NEXT: entry:
3422 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3423 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3424 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3425 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3426 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3427 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3428 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3429 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3430 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3431 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3432 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3433 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3434 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3435 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3436 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3437 // CHECK5-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3438 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3439 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3440 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3441 // CHECK5-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3442 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3443 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3444 // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3445 // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3446 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3447 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3448 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3449 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3450 // CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3451 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
3452 // CHECK5-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
3453 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
3454 // CHECK5: omp.dispatch.cond:
3455 // CHECK5-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
3456 // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
3457 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
3458 // CHECK5: omp.dispatch.body:
3459 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3460 // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
3461 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3462 // CHECK5: omp.inner.for.cond:
3463 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
3464 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
3465 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
3466 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3467 // CHECK5: omp.inner.for.body:
3468 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
3469 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
3470 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3471 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
3472 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3473 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
3474 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
3475 // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP39]]
3476 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3477 // CHECK5: omp.body.continue:
3478 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3479 // CHECK5: omp.inner.for.inc:
3480 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
3481 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
3482 // CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
3483 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
3484 // CHECK5: omp.inner.for.end:
3485 // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
3486 // CHECK5: omp.dispatch.inc:
3487 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]]
3488 // CHECK5: omp.dispatch.end:
3489 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3490 // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
3491 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3492 // CHECK5: .omp.final.then:
3493 // CHECK5-NEXT: store i32 123, ptr [[I]], align 4
3494 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3495 // CHECK5: .omp.final.done:
3496 // CHECK5-NEXT: ret void
3499 // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
3500 // CHECK5-SAME: () #[[ATTR4:[0-9]+]] {
3501 // CHECK5-NEXT: entry:
3502 // CHECK5-NEXT: call void @__tgt_register_requires(i64 1)
3503 // CHECK5-NEXT: ret void
3506 // CHECK6-LABEL: define {{[^@]+}}@_Z21teams_template_structv
3507 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] {
3508 // CHECK6-NEXT: entry:
3509 // CHECK6-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
3510 // CHECK6-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
3511 // CHECK6-NEXT: ret i32 [[CALL]]
3514 // CHECK6-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
3515 // CHECK6-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
3516 // CHECK6-NEXT: entry:
3517 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3518 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
3519 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
3520 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
3521 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3522 // CHECK6-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
3523 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x ptr], align 4
3524 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x ptr], align 4
3525 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x ptr], align 4
3526 // CHECK6-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
3527 // CHECK6-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3528 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS11:%.*]] = alloca [1 x ptr], align 4
3529 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS12:%.*]] = alloca [1 x ptr], align 4
3530 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS13:%.*]] = alloca [1 x ptr], align 4
3531 // CHECK6-NEXT: [[_TMP14:%.*]] = alloca i32, align 4
3532 // CHECK6-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3533 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [1 x ptr], align 4
3534 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [1 x ptr], align 4
3535 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [1 x ptr], align 4
3536 // CHECK6-NEXT: [[_TMP22:%.*]] = alloca i32, align 4
3537 // CHECK6-NEXT: [[KERNEL_ARGS23:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3538 // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x ptr], align 4
3539 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS28:%.*]] = alloca [1 x ptr], align 4
3540 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS29:%.*]] = alloca [1 x ptr], align 4
3541 // CHECK6-NEXT: [[_TMP30:%.*]] = alloca i32, align 4
3542 // CHECK6-NEXT: [[KERNEL_ARGS31:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3543 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3544 // CHECK6-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3545 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
3546 // CHECK6-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3547 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4
3548 // CHECK6-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3549 // CHECK6-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
3550 // CHECK6-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
3551 // CHECK6-NEXT: store ptr null, ptr [[TMP2]], align 4
3552 // CHECK6-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3553 // CHECK6-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3554 // CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
3555 // CHECK6-NEXT: store i32 2, ptr [[TMP5]], align 4
3556 // CHECK6-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
3557 // CHECK6-NEXT: store i32 1, ptr [[TMP6]], align 4
3558 // CHECK6-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
3559 // CHECK6-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
3560 // CHECK6-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
3561 // CHECK6-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
3562 // CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
3563 // CHECK6-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4
3564 // CHECK6-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
3565 // CHECK6-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4
3566 // CHECK6-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
3567 // CHECK6-NEXT: store ptr null, ptr [[TMP11]], align 4
3568 // CHECK6-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
3569 // CHECK6-NEXT: store ptr null, ptr [[TMP12]], align 4
3570 // CHECK6-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
3571 // CHECK6-NEXT: store i64 123, ptr [[TMP13]], align 8
3572 // CHECK6-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
3573 // CHECK6-NEXT: store i64 0, ptr [[TMP14]], align 8
3574 // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
3575 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
3576 // CHECK6-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
3577 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
3578 // CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
3579 // CHECK6-NEXT: store i32 0, ptr [[TMP17]], align 4
3580 // CHECK6-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.region_id, ptr [[KERNEL_ARGS]])
3581 // CHECK6-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
3582 // CHECK6-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3583 // CHECK6: omp_offload.failed:
3584 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36(ptr [[THIS1]]) #[[ATTR3:[0-9]+]]
3585 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]]
3586 // CHECK6: omp_offload.cont:
3587 // CHECK6-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3588 // CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
3589 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP20]], align 4
3590 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
3591 // CHECK6-NEXT: store ptr [[A2]], ptr [[TMP21]], align 4
3592 // CHECK6-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0
3593 // CHECK6-NEXT: store ptr null, ptr [[TMP22]], align 4
3594 // CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0
3595 // CHECK6-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0
3596 // CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0
3597 // CHECK6-NEXT: store i32 2, ptr [[TMP25]], align 4
3598 // CHECK6-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1
3599 // CHECK6-NEXT: store i32 1, ptr [[TMP26]], align 4
3600 // CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2
3601 // CHECK6-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
3602 // CHECK6-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3
3603 // CHECK6-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
3604 // CHECK6-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4
3605 // CHECK6-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 4
3606 // CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5
3607 // CHECK6-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 4
3608 // CHECK6-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6
3609 // CHECK6-NEXT: store ptr null, ptr [[TMP31]], align 4
3610 // CHECK6-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7
3611 // CHECK6-NEXT: store ptr null, ptr [[TMP32]], align 4
3612 // CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8
3613 // CHECK6-NEXT: store i64 123, ptr [[TMP33]], align 8
3614 // CHECK6-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9
3615 // CHECK6-NEXT: store i64 0, ptr [[TMP34]], align 8
3616 // CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10
3617 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
3618 // CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11
3619 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
3620 // CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12
3621 // CHECK6-NEXT: store i32 0, ptr [[TMP37]], align 4
3622 // CHECK6-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.region_id, ptr [[KERNEL_ARGS7]])
3623 // CHECK6-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
3624 // CHECK6-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]]
3625 // CHECK6: omp_offload.failed8:
3626 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41(ptr [[THIS1]]) #[[ATTR3]]
3627 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT9]]
3628 // CHECK6: omp_offload.cont9:
3629 // CHECK6-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3630 // CHECK6-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
3631 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP40]], align 4
3632 // CHECK6-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
3633 // CHECK6-NEXT: store ptr [[A10]], ptr [[TMP41]], align 4
3634 // CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS13]], i32 0, i32 0
3635 // CHECK6-NEXT: store ptr null, ptr [[TMP42]], align 4
3636 // CHECK6-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS11]], i32 0, i32 0
3637 // CHECK6-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS12]], i32 0, i32 0
3638 // CHECK6-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
3639 // CHECK6-NEXT: store i32 2, ptr [[TMP45]], align 4
3640 // CHECK6-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
3641 // CHECK6-NEXT: store i32 1, ptr [[TMP46]], align 4
3642 // CHECK6-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
3643 // CHECK6-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 4
3644 // CHECK6-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
3645 // CHECK6-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 4
3646 // CHECK6-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
3647 // CHECK6-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 4
3648 // CHECK6-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
3649 // CHECK6-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 4
3650 // CHECK6-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
3651 // CHECK6-NEXT: store ptr null, ptr [[TMP51]], align 4
3652 // CHECK6-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
3653 // CHECK6-NEXT: store ptr null, ptr [[TMP52]], align 4
3654 // CHECK6-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
3655 // CHECK6-NEXT: store i64 123, ptr [[TMP53]], align 8
3656 // CHECK6-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
3657 // CHECK6-NEXT: store i64 0, ptr [[TMP54]], align 8
3658 // CHECK6-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
3659 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
3660 // CHECK6-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
3661 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
3662 // CHECK6-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
3663 // CHECK6-NEXT: store i32 0, ptr [[TMP57]], align 4
3664 // CHECK6-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.region_id, ptr [[KERNEL_ARGS15]])
3665 // CHECK6-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
3666 // CHECK6-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
3667 // CHECK6: omp_offload.failed16:
3668 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46(ptr [[THIS1]]) #[[ATTR3]]
3669 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT17]]
3670 // CHECK6: omp_offload.cont17:
3671 // CHECK6-NEXT: [[A18:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3672 // CHECK6-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
3673 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP60]], align 4
3674 // CHECK6-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
3675 // CHECK6-NEXT: store ptr [[A18]], ptr [[TMP61]], align 4
3676 // CHECK6-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
3677 // CHECK6-NEXT: store ptr null, ptr [[TMP62]], align 4
3678 // CHECK6-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
3679 // CHECK6-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
3680 // CHECK6-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 0
3681 // CHECK6-NEXT: store i32 2, ptr [[TMP65]], align 4
3682 // CHECK6-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 1
3683 // CHECK6-NEXT: store i32 1, ptr [[TMP66]], align 4
3684 // CHECK6-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 2
3685 // CHECK6-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 4
3686 // CHECK6-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 3
3687 // CHECK6-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 4
3688 // CHECK6-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 4
3689 // CHECK6-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 4
3690 // CHECK6-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 5
3691 // CHECK6-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 4
3692 // CHECK6-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 6
3693 // CHECK6-NEXT: store ptr null, ptr [[TMP71]], align 4
3694 // CHECK6-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 7
3695 // CHECK6-NEXT: store ptr null, ptr [[TMP72]], align 4
3696 // CHECK6-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 8
3697 // CHECK6-NEXT: store i64 123, ptr [[TMP73]], align 8
3698 // CHECK6-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 9
3699 // CHECK6-NEXT: store i64 0, ptr [[TMP74]], align 8
3700 // CHECK6-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 10
3701 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
3702 // CHECK6-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 11
3703 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
3704 // CHECK6-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS23]], i32 0, i32 12
3705 // CHECK6-NEXT: store i32 0, ptr [[TMP77]], align 4
3706 // CHECK6-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.region_id, ptr [[KERNEL_ARGS23]])
3707 // CHECK6-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
3708 // CHECK6-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]]
3709 // CHECK6: omp_offload.failed24:
3710 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52(ptr [[THIS1]]) #[[ATTR3]]
3711 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT25]]
3712 // CHECK6: omp_offload.cont25:
3713 // CHECK6-NEXT: [[A26:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3714 // CHECK6-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
3715 // CHECK6-NEXT: store ptr [[THIS1]], ptr [[TMP80]], align 4
3716 // CHECK6-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
3717 // CHECK6-NEXT: store ptr [[A26]], ptr [[TMP81]], align 4
3718 // CHECK6-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0
3719 // CHECK6-NEXT: store ptr null, ptr [[TMP82]], align 4
3720 // CHECK6-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0
3721 // CHECK6-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0
3722 // CHECK6-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 0
3723 // CHECK6-NEXT: store i32 2, ptr [[TMP85]], align 4
3724 // CHECK6-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 1
3725 // CHECK6-NEXT: store i32 1, ptr [[TMP86]], align 4
3726 // CHECK6-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 2
3727 // CHECK6-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 4
3728 // CHECK6-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 3
3729 // CHECK6-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 4
3730 // CHECK6-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 4
3731 // CHECK6-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 4
3732 // CHECK6-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 5
3733 // CHECK6-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 4
3734 // CHECK6-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 6
3735 // CHECK6-NEXT: store ptr null, ptr [[TMP91]], align 4
3736 // CHECK6-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 7
3737 // CHECK6-NEXT: store ptr null, ptr [[TMP92]], align 4
3738 // CHECK6-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 8
3739 // CHECK6-NEXT: store i64 123, ptr [[TMP93]], align 8
3740 // CHECK6-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 9
3741 // CHECK6-NEXT: store i64 0, ptr [[TMP94]], align 8
3742 // CHECK6-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 10
3743 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
3744 // CHECK6-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 11
3745 // CHECK6-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
3746 // CHECK6-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS31]], i32 0, i32 12
3747 // CHECK6-NEXT: store i32 0, ptr [[TMP97]], align 4
3748 // CHECK6-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.region_id, ptr [[KERNEL_ARGS31]])
3749 // CHECK6-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
3750 // CHECK6-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]]
3751 // CHECK6: omp_offload.failed32:
3752 // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58(ptr [[THIS1]]) #[[ATTR3]]
3753 // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT33]]
3754 // CHECK6: omp_offload.cont33:
3755 // CHECK6-NEXT: [[A34:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
3756 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A34]], i32 0, i32 0
3757 // CHECK6-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
3758 // CHECK6-NEXT: ret i32 [[TMP100]]
3761 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36
3762 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] {
3763 // CHECK6-NEXT: entry:
3764 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3765 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3766 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3767 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined, ptr [[TMP0]])
3768 // CHECK6-NEXT: ret void
3771 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined
3772 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2:[0-9]+]] {
3773 // CHECK6-NEXT: entry:
3774 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3775 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3776 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3777 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3778 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3779 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3780 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3781 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3782 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3783 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
3784 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3785 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3786 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3787 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3788 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3789 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3790 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3791 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3792 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3793 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3794 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3795 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3796 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3797 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3798 // CHECK6: cond.true:
3799 // CHECK6-NEXT: br label [[COND_END:%.*]]
3800 // CHECK6: cond.false:
3801 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3802 // CHECK6-NEXT: br label [[COND_END]]
3803 // CHECK6: cond.end:
3804 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3805 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3806 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3807 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3808 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3809 // CHECK6: omp.inner.for.cond:
3810 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
3811 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
3812 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3813 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3814 // CHECK6: omp.inner.for.body:
3815 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
3816 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
3817 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP9]]
3818 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3819 // CHECK6: omp.inner.for.inc:
3820 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
3821 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
3822 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3823 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
3824 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
3825 // CHECK6: omp.inner.for.end:
3826 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3827 // CHECK6: omp.loop.exit:
3828 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3829 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3830 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3831 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3832 // CHECK6: .omp.final.then:
3833 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
3834 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
3835 // CHECK6: .omp.final.done:
3836 // CHECK6-NEXT: ret void
3839 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l36.omp_outlined.omp_outlined
3840 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3841 // CHECK6-NEXT: entry:
3842 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3843 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3844 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
3845 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
3846 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3847 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3848 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3849 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3850 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3851 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3852 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3853 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
3854 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3855 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3856 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3857 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3858 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3859 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3860 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3861 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
3862 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
3863 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
3864 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
3865 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
3866 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3867 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3868 // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3869 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
3870 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3871 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3872 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
3873 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3874 // CHECK6: cond.true:
3875 // CHECK6-NEXT: br label [[COND_END:%.*]]
3876 // CHECK6: cond.false:
3877 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3878 // CHECK6-NEXT: br label [[COND_END]]
3879 // CHECK6: cond.end:
3880 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
3881 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3882 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3883 // CHECK6-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
3884 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3885 // CHECK6: omp.inner.for.cond:
3886 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
3887 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
3888 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
3889 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3890 // CHECK6: omp.inner.for.body:
3891 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3892 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
3893 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3894 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
3895 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
3896 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
3897 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
3898 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]]
3899 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3900 // CHECK6: omp.body.continue:
3901 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3902 // CHECK6: omp.inner.for.inc:
3903 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3904 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
3905 // CHECK6-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3906 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
3907 // CHECK6: omp.inner.for.end:
3908 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3909 // CHECK6: omp.loop.exit:
3910 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
3911 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3912 // CHECK6-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3913 // CHECK6-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3914 // CHECK6: .omp.final.then:
3915 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
3916 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
3917 // CHECK6: .omp.final.done:
3918 // CHECK6-NEXT: ret void
3921 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41
3922 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
3923 // CHECK6-NEXT: entry:
3924 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3925 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3926 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3927 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined, ptr [[TMP0]])
3928 // CHECK6-NEXT: ret void
3931 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined
3932 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
3933 // CHECK6-NEXT: entry:
3934 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
3935 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
3936 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
3937 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3938 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
3939 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3940 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3941 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3942 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3943 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
3944 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
3945 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
3946 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
3947 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
3948 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3949 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
3950 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3951 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3952 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
3953 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3954 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3955 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3956 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
3957 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3958 // CHECK6: cond.true:
3959 // CHECK6-NEXT: br label [[COND_END:%.*]]
3960 // CHECK6: cond.false:
3961 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3962 // CHECK6-NEXT: br label [[COND_END]]
3963 // CHECK6: cond.end:
3964 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3965 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3966 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3967 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3968 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3969 // CHECK6: omp.inner.for.cond:
3970 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
3971 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
3972 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3973 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3974 // CHECK6: omp.inner.for.body:
3975 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
3976 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
3977 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP18]]
3978 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3979 // CHECK6: omp.inner.for.inc:
3980 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
3981 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
3982 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
3983 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
3984 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
3985 // CHECK6: omp.inner.for.end:
3986 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3987 // CHECK6: omp.loop.exit:
3988 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3989 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3990 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
3991 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3992 // CHECK6: .omp.final.then:
3993 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
3994 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
3995 // CHECK6: .omp.final.done:
3996 // CHECK6-NEXT: ret void
3999 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l41.omp_outlined.omp_outlined
4000 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
4001 // CHECK6-NEXT: entry:
4002 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4003 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4004 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4005 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4006 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4007 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4008 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4009 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4010 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4011 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4012 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4013 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4014 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4015 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4016 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4017 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4018 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4019 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4020 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4021 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4022 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4023 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4024 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4025 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4026 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4027 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4028 // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4029 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
4030 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4031 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4032 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 122
4033 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4034 // CHECK6: cond.true:
4035 // CHECK6-NEXT: br label [[COND_END:%.*]]
4036 // CHECK6: cond.false:
4037 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4038 // CHECK6-NEXT: br label [[COND_END]]
4039 // CHECK6: cond.end:
4040 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
4041 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
4042 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4043 // CHECK6-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
4044 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4045 // CHECK6: omp.inner.for.cond:
4046 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
4047 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
4048 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
4049 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4050 // CHECK6: omp.inner.for.body:
4051 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
4052 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
4053 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4054 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
4055 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4056 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
4057 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]]
4058 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]]
4059 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4060 // CHECK6: omp.body.continue:
4061 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4062 // CHECK6: omp.inner.for.inc:
4063 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
4064 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
4065 // CHECK6-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
4066 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
4067 // CHECK6: omp.inner.for.end:
4068 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4069 // CHECK6: omp.loop.exit:
4070 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
4071 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4072 // CHECK6-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4073 // CHECK6-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4074 // CHECK6: .omp.final.then:
4075 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4076 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4077 // CHECK6: .omp.final.done:
4078 // CHECK6-NEXT: ret void
4081 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46
4082 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4083 // CHECK6-NEXT: entry:
4084 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4085 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4086 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4087 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined, ptr [[TMP0]])
4088 // CHECK6-NEXT: ret void
4091 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined
4092 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
4093 // CHECK6-NEXT: entry:
4094 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4095 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4096 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4097 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4098 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4099 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4100 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4101 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4102 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4103 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4104 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4105 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4106 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4107 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4108 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4109 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
4110 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4111 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4112 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4113 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
4114 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4115 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4116 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4117 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4118 // CHECK6: cond.true:
4119 // CHECK6-NEXT: br label [[COND_END:%.*]]
4120 // CHECK6: cond.false:
4121 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4122 // CHECK6-NEXT: br label [[COND_END]]
4123 // CHECK6: cond.end:
4124 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4125 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4126 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4127 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
4128 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4129 // CHECK6: omp.inner.for.cond:
4130 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
4131 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
4132 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4133 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4134 // CHECK6: omp.inner.for.body:
4135 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
4136 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
4137 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP24]]
4138 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4139 // CHECK6: omp.inner.for.inc:
4140 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
4141 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
4142 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
4143 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
4144 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
4145 // CHECK6: omp.inner.for.end:
4146 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4147 // CHECK6: omp.loop.exit:
4148 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
4149 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4150 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
4151 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4152 // CHECK6: .omp.final.then:
4153 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4154 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4155 // CHECK6: .omp.final.done:
4156 // CHECK6-NEXT: ret void
4159 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l46.omp_outlined.omp_outlined
4160 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
4161 // CHECK6-NEXT: entry:
4162 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4163 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4164 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4165 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4166 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4167 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4168 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4169 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4170 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4171 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4172 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4173 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4174 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4175 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4176 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4177 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4178 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4179 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4180 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4181 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4182 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4183 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4184 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4185 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4186 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4187 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4188 // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4189 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
4190 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 61)
4191 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4192 // CHECK6: omp.dispatch.cond:
4193 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4194 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4195 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
4196 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4197 // CHECK6: cond.true:
4198 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4199 // CHECK6-NEXT: br label [[COND_END:%.*]]
4200 // CHECK6: cond.false:
4201 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4202 // CHECK6-NEXT: br label [[COND_END]]
4203 // CHECK6: cond.end:
4204 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ [[TMP7]], [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ]
4205 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
4206 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4207 // CHECK6-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
4208 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
4209 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4210 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
4211 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4212 // CHECK6: omp.dispatch.body:
4213 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4214 // CHECK6: omp.inner.for.cond:
4215 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
4216 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
4217 // CHECK6-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
4218 // CHECK6-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4219 // CHECK6: omp.inner.for.body:
4220 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
4221 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1
4222 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4223 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
4224 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4225 // CHECK6-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
4226 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP15]]
4227 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]]
4228 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4229 // CHECK6: omp.body.continue:
4230 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4231 // CHECK6: omp.inner.for.inc:
4232 // CHECK6-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
4233 // CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], 1
4234 // CHECK6-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
4235 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
4236 // CHECK6: omp.inner.for.end:
4237 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4238 // CHECK6: omp.dispatch.inc:
4239 // CHECK6-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4240 // CHECK6-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
4241 // CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
4242 // CHECK6-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
4243 // CHECK6-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4244 // CHECK6-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
4245 // CHECK6-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
4246 // CHECK6-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
4247 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
4248 // CHECK6: omp.dispatch.end:
4249 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
4250 // CHECK6-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4251 // CHECK6-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
4252 // CHECK6-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4253 // CHECK6: .omp.final.then:
4254 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4255 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4256 // CHECK6: .omp.final.done:
4257 // CHECK6-NEXT: ret void
4260 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52
4261 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4262 // CHECK6-NEXT: entry:
4263 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4264 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4265 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4266 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined, ptr [[TMP0]])
4267 // CHECK6-NEXT: ret void
4270 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined
4271 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
4272 // CHECK6-NEXT: entry:
4273 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4274 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4275 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4276 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4277 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4278 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4279 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4280 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4281 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4282 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4283 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4284 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4285 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4286 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4287 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4288 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
4289 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4290 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4291 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4292 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
4293 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4294 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4295 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4296 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4297 // CHECK6: cond.true:
4298 // CHECK6-NEXT: br label [[COND_END:%.*]]
4299 // CHECK6: cond.false:
4300 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4301 // CHECK6-NEXT: br label [[COND_END]]
4302 // CHECK6: cond.end:
4303 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4304 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4305 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4306 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
4307 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4308 // CHECK6: omp.inner.for.cond:
4309 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
4310 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
4311 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4312 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4313 // CHECK6: omp.inner.for.body:
4314 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
4315 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
4316 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP30]]
4317 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4318 // CHECK6: omp.inner.for.inc:
4319 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
4320 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
4321 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
4322 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
4323 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
4324 // CHECK6: omp.inner.for.end:
4325 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4326 // CHECK6: omp.loop.exit:
4327 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
4328 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4329 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
4330 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4331 // CHECK6: .omp.final.then:
4332 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4333 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4334 // CHECK6: .omp.final.done:
4335 // CHECK6-NEXT: ret void
4338 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l52.omp_outlined.omp_outlined
4339 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
4340 // CHECK6-NEXT: entry:
4341 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4342 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4343 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4344 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4345 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4346 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4347 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4348 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4349 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4350 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4351 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4352 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4353 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4354 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4355 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4356 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4357 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4358 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4359 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4360 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4361 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4362 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4363 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4364 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4365 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4366 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4367 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4368 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4369 // CHECK6-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4370 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
4371 // CHECK6-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
4372 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4373 // CHECK6: omp.dispatch.cond:
4374 // CHECK6-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
4375 // CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
4376 // CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4377 // CHECK6: omp.dispatch.body:
4378 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4379 // CHECK6-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
4380 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4381 // CHECK6: omp.inner.for.cond:
4382 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
4383 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
4384 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4385 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4386 // CHECK6: omp.inner.for.body:
4387 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4388 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
4389 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4390 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
4391 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4392 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
4393 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
4394 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]]
4395 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4396 // CHECK6: omp.body.continue:
4397 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4398 // CHECK6: omp.inner.for.inc:
4399 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4400 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
4401 // CHECK6-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4402 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
4403 // CHECK6: omp.inner.for.end:
4404 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4405 // CHECK6: omp.dispatch.inc:
4406 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
4407 // CHECK6: omp.dispatch.end:
4408 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4409 // CHECK6-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4410 // CHECK6-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4411 // CHECK6: .omp.final.then:
4412 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4413 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4414 // CHECK6: .omp.final.done:
4415 // CHECK6-NEXT: ret void
4418 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58
4419 // CHECK6-SAME: (ptr noundef [[THIS:%.*]]) #[[ATTR1]] {
4420 // CHECK6-NEXT: entry:
4421 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4422 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4423 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4424 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined, ptr [[TMP0]])
4425 // CHECK6-NEXT: ret void
4428 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined
4429 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
4430 // CHECK6-NEXT: entry:
4431 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4432 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4433 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4434 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4435 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4436 // CHECK6-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4437 // CHECK6-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4438 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4439 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4440 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4441 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4442 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4443 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4444 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4445 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4446 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_COMB_UB]], align 4
4447 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4448 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4449 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4450 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
4451 // CHECK6-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4452 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4453 // CHECK6-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122
4454 // CHECK6-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4455 // CHECK6: cond.true:
4456 // CHECK6-NEXT: br label [[COND_END:%.*]]
4457 // CHECK6: cond.false:
4458 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4459 // CHECK6-NEXT: br label [[COND_END]]
4460 // CHECK6: cond.end:
4461 // CHECK6-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
4462 // CHECK6-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4463 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4464 // CHECK6-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
4465 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4466 // CHECK6: omp.inner.for.cond:
4467 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
4468 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
4469 // CHECK6-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4470 // CHECK6-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4471 // CHECK6: omp.inner.for.body:
4472 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
4473 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
4474 // CHECK6-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP36]]
4475 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4476 // CHECK6: omp.inner.for.inc:
4477 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
4478 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
4479 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
4480 // CHECK6-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
4481 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
4482 // CHECK6: omp.inner.for.end:
4483 // CHECK6-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4484 // CHECK6: omp.loop.exit:
4485 // CHECK6-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
4486 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4487 // CHECK6-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
4488 // CHECK6-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4489 // CHECK6: .omp.final.then:
4490 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4491 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4492 // CHECK6: .omp.final.done:
4493 // CHECK6-NEXT: ret void
4496 // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l58.omp_outlined.omp_outlined
4497 // CHECK6-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR2]] {
4498 // CHECK6-NEXT: entry:
4499 // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
4500 // CHECK6-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
4501 // CHECK6-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
4502 // CHECK6-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
4503 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4504 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4505 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4
4506 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4507 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4508 // CHECK6-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4509 // CHECK6-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4510 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4
4511 // CHECK6-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
4512 // CHECK6-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
4513 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4514 // CHECK6-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4515 // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4516 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4517 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4518 // CHECK6-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4519 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
4520 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
4521 // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
4522 // CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
4523 // CHECK6-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4524 // CHECK6-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4525 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4526 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4527 // CHECK6-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
4528 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
4529 // CHECK6-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 61)
4530 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
4531 // CHECK6: omp.dispatch.cond:
4532 // CHECK6-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
4533 // CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
4534 // CHECK6-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
4535 // CHECK6: omp.dispatch.body:
4536 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4537 // CHECK6-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
4538 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4539 // CHECK6: omp.inner.for.cond:
4540 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
4541 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
4542 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4543 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4544 // CHECK6: omp.inner.for.body:
4545 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4546 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
4547 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4548 // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
4549 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0
4550 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
4551 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP12]]
4552 // CHECK6-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP39]]
4553 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4554 // CHECK6: omp.body.continue:
4555 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4556 // CHECK6: omp.inner.for.inc:
4557 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4558 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
4559 // CHECK6-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4560 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
4561 // CHECK6: omp.inner.for.end:
4562 // CHECK6-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
4563 // CHECK6: omp.dispatch.inc:
4564 // CHECK6-NEXT: br label [[OMP_DISPATCH_COND]]
4565 // CHECK6: omp.dispatch.end:
4566 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4567 // CHECK6-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4568 // CHECK6-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4569 // CHECK6: .omp.final.then:
4570 // CHECK6-NEXT: store i32 123, ptr [[I]], align 4
4571 // CHECK6-NEXT: br label [[DOTOMP_FINAL_DONE]]
4572 // CHECK6: .omp.final.done:
4573 // CHECK6-NEXT: ret void
4576 // CHECK6-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
4577 // CHECK6-SAME: () #[[ATTR4:[0-9]+]] {
4578 // CHECK6-NEXT: entry:
4579 // CHECK6-NEXT: call void @__tgt_register_requires(i64 1)
4580 // CHECK6-NEXT: ret void
4583 // CHECK9-LABEL: define {{[^@]+}}@_Z21teams_template_structv
4584 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
4585 // CHECK9-NEXT: entry:
4586 // CHECK9-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
4587 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
4588 // CHECK9-NEXT: ret i32 [[CALL]]
4591 // CHECK9-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
4592 // CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat {
4593 // CHECK9-NEXT: entry:
4594 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4595 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4596 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4597 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4598 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4599 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
4600 // CHECK9-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
4601 // CHECK9-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
4602 // CHECK9-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
4603 // CHECK9-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
4604 // CHECK9-NEXT: [[I7:%.*]] = alloca i32, align 4
4605 // CHECK9-NEXT: [[_TMP20:%.*]] = alloca i32, align 4
4606 // CHECK9-NEXT: [[DOTOMP_LB21:%.*]] = alloca i32, align 4
4607 // CHECK9-NEXT: [[DOTOMP_UB22:%.*]] = alloca i32, align 4
4608 // CHECK9-NEXT: [[DOTOMP_IV23:%.*]] = alloca i32, align 4
4609 // CHECK9-NEXT: [[I24:%.*]] = alloca i32, align 4
4610 // CHECK9-NEXT: [[_TMP37:%.*]] = alloca i32, align 4
4611 // CHECK9-NEXT: [[DOTOMP_LB38:%.*]] = alloca i32, align 4
4612 // CHECK9-NEXT: [[DOTOMP_UB39:%.*]] = alloca i32, align 4
4613 // CHECK9-NEXT: [[DOTOMP_IV40:%.*]] = alloca i32, align 4
4614 // CHECK9-NEXT: [[I41:%.*]] = alloca i32, align 4
4615 // CHECK9-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
4616 // CHECK9-NEXT: [[DOTOMP_LB55:%.*]] = alloca i32, align 4
4617 // CHECK9-NEXT: [[DOTOMP_UB56:%.*]] = alloca i32, align 4
4618 // CHECK9-NEXT: [[DOTOMP_IV57:%.*]] = alloca i32, align 4
4619 // CHECK9-NEXT: [[I58:%.*]] = alloca i32, align 4
4620 // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4621 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4622 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4623 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4624 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4625 // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
4626 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4627 // CHECK9: omp.inner.for.cond:
4628 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
4629 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
4630 // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
4631 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4632 // CHECK9: omp.inner.for.body:
4633 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4634 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
4635 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4636 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
4637 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
4638 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
4639 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
4640 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
4641 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
4642 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4643 // CHECK9: omp.body.continue:
4644 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4645 // CHECK9: omp.inner.for.inc:
4646 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4647 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
4648 // CHECK9-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4649 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
4650 // CHECK9: omp.inner.for.end:
4651 // CHECK9-NEXT: store i32 123, ptr [[I]], align 4
4652 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
4653 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB5]], align 4
4654 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
4655 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV6]], align 4
4656 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
4657 // CHECK9: omp.inner.for.cond8:
4658 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
4659 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]]
4660 // CHECK9-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
4661 // CHECK9-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END19:%.*]]
4662 // CHECK9: omp.inner.for.body10:
4663 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4664 // CHECK9-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
4665 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
4666 // CHECK9-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
4667 // CHECK9-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4668 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
4669 // CHECK9-NEXT: [[IDXPROM14:%.*]] = sext i32 [[TMP10]] to i64
4670 // CHECK9-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [123 x i32], ptr [[A13]], i64 0, i64 [[IDXPROM14]]
4671 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX15]], align 4, !llvm.access.group [[ACC_GRP6]]
4672 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]]
4673 // CHECK9: omp.body.continue16:
4674 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]]
4675 // CHECK9: omp.inner.for.inc17:
4676 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4677 // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1
4678 // CHECK9-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4679 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
4680 // CHECK9: omp.inner.for.end19:
4681 // CHECK9-NEXT: store i32 123, ptr [[I7]], align 4
4682 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB21]], align 4
4683 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB22]], align 4
4684 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4
4685 // CHECK9-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV23]], align 4
4686 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]]
4687 // CHECK9: omp.inner.for.cond25:
4688 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
4689 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP9]]
4690 // CHECK9-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
4691 // CHECK9-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END36:%.*]]
4692 // CHECK9: omp.inner.for.body27:
4693 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]]
4694 // CHECK9-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP15]], 1
4695 // CHECK9-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]]
4696 // CHECK9-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP9]]
4697 // CHECK9-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4698 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP9]]
4699 // CHECK9-NEXT: [[IDXPROM31:%.*]] = sext i32 [[TMP16]] to i64
4700 // CHECK9-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds [123 x i32], ptr [[A30]], i64 0, i64 [[IDXPROM31]]
4701 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX32]], align 4, !llvm.access.group [[ACC_GRP9]]
4702 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE33:%.*]]
4703 // CHECK9: omp.body.continue33:
4704 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC34:%.*]]
4705 // CHECK9: omp.inner.for.inc34:
4706 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]]
4707 // CHECK9-NEXT: [[ADD35:%.*]] = add nsw i32 [[TMP17]], 1
4708 // CHECK9-NEXT: store i32 [[ADD35]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]]
4709 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]]
4710 // CHECK9: omp.inner.for.end36:
4711 // CHECK9-NEXT: store i32 123, ptr [[I24]], align 4
4712 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB38]], align 4
4713 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB39]], align 4
4714 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB38]], align 4
4715 // CHECK9-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV40]], align 4
4716 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND42:%.*]]
4717 // CHECK9: omp.inner.for.cond42:
4718 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
4719 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB39]], align 4, !llvm.access.group [[ACC_GRP12]]
4720 // CHECK9-NEXT: [[CMP43:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4721 // CHECK9-NEXT: br i1 [[CMP43]], label [[OMP_INNER_FOR_BODY44:%.*]], label [[OMP_INNER_FOR_END53:%.*]]
4722 // CHECK9: omp.inner.for.body44:
4723 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12]]
4724 // CHECK9-NEXT: [[MUL45:%.*]] = mul nsw i32 [[TMP21]], 1
4725 // CHECK9-NEXT: [[ADD46:%.*]] = add nsw i32 0, [[MUL45]]
4726 // CHECK9-NEXT: store i32 [[ADD46]], ptr [[I41]], align 4, !llvm.access.group [[ACC_GRP12]]
4727 // CHECK9-NEXT: [[A47:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4728 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[I41]], align 4, !llvm.access.group [[ACC_GRP12]]
4729 // CHECK9-NEXT: [[IDXPROM48:%.*]] = sext i32 [[TMP22]] to i64
4730 // CHECK9-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds [123 x i32], ptr [[A47]], i64 0, i64 [[IDXPROM48]]
4731 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX49]], align 4, !llvm.access.group [[ACC_GRP12]]
4732 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE50:%.*]]
4733 // CHECK9: omp.body.continue50:
4734 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC51:%.*]]
4735 // CHECK9: omp.inner.for.inc51:
4736 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12]]
4737 // CHECK9-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP23]], 1
4738 // CHECK9-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV40]], align 4, !llvm.access.group [[ACC_GRP12]]
4739 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND42]], !llvm.loop [[LOOP13:![0-9]+]]
4740 // CHECK9: omp.inner.for.end53:
4741 // CHECK9-NEXT: store i32 123, ptr [[I41]], align 4
4742 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB55]], align 4
4743 // CHECK9-NEXT: store i32 122, ptr [[DOTOMP_UB56]], align 4
4744 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4
4745 // CHECK9-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV57]], align 4
4746 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND59:%.*]]
4747 // CHECK9: omp.inner.for.cond59:
4748 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
4749 // CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP15]]
4750 // CHECK9-NEXT: [[CMP60:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
4751 // CHECK9-NEXT: br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END70:%.*]]
4752 // CHECK9: omp.inner.for.body61:
4753 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]]
4754 // CHECK9-NEXT: [[MUL62:%.*]] = mul nsw i32 [[TMP27]], 1
4755 // CHECK9-NEXT: [[ADD63:%.*]] = add nsw i32 0, [[MUL62]]
4756 // CHECK9-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP15]]
4757 // CHECK9-NEXT: [[A64:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4758 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP15]]
4759 // CHECK9-NEXT: [[IDXPROM65:%.*]] = sext i32 [[TMP28]] to i64
4760 // CHECK9-NEXT: [[ARRAYIDX66:%.*]] = getelementptr inbounds [123 x i32], ptr [[A64]], i64 0, i64 [[IDXPROM65]]
4761 // CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX66]], align 4, !llvm.access.group [[ACC_GRP15]]
4762 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE67:%.*]]
4763 // CHECK9: omp.body.continue67:
4764 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC68:%.*]]
4765 // CHECK9: omp.inner.for.inc68:
4766 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]]
4767 // CHECK9-NEXT: [[ADD69:%.*]] = add nsw i32 [[TMP29]], 1
4768 // CHECK9-NEXT: store i32 [[ADD69]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]]
4769 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]]
4770 // CHECK9: omp.inner.for.end70:
4771 // CHECK9-NEXT: store i32 123, ptr [[I58]], align 4
4772 // CHECK9-NEXT: [[A71:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4773 // CHECK9-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds [123 x i32], ptr [[A71]], i64 0, i64 0
4774 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX72]], align 4
4775 // CHECK9-NEXT: ret i32 [[TMP30]]
4778 // CHECK11-LABEL: define {{[^@]+}}@_Z21teams_template_structv
4779 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] {
4780 // CHECK11-NEXT: entry:
4781 // CHECK11-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4
4782 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(ptr noundef nonnull align 4 dereferenceable(496) [[V]])
4783 // CHECK11-NEXT: ret i32 [[CALL]]
4786 // CHECK11-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv
4787 // CHECK11-SAME: (ptr noundef nonnull align 4 dereferenceable(496) [[THIS:%.*]]) #[[ATTR0]] comdat align 2 {
4788 // CHECK11-NEXT: entry:
4789 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4
4790 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
4791 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4792 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4793 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4794 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
4795 // CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
4796 // CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
4797 // CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
4798 // CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
4799 // CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4
4800 // CHECK11-NEXT: [[_TMP19:%.*]] = alloca i32, align 4
4801 // CHECK11-NEXT: [[DOTOMP_LB20:%.*]] = alloca i32, align 4
4802 // CHECK11-NEXT: [[DOTOMP_UB21:%.*]] = alloca i32, align 4
4803 // CHECK11-NEXT: [[DOTOMP_IV22:%.*]] = alloca i32, align 4
4804 // CHECK11-NEXT: [[I23:%.*]] = alloca i32, align 4
4805 // CHECK11-NEXT: [[_TMP35:%.*]] = alloca i32, align 4
4806 // CHECK11-NEXT: [[DOTOMP_LB36:%.*]] = alloca i32, align 4
4807 // CHECK11-NEXT: [[DOTOMP_UB37:%.*]] = alloca i32, align 4
4808 // CHECK11-NEXT: [[DOTOMP_IV38:%.*]] = alloca i32, align 4
4809 // CHECK11-NEXT: [[I39:%.*]] = alloca i32, align 4
4810 // CHECK11-NEXT: [[_TMP51:%.*]] = alloca i32, align 4
4811 // CHECK11-NEXT: [[DOTOMP_LB52:%.*]] = alloca i32, align 4
4812 // CHECK11-NEXT: [[DOTOMP_UB53:%.*]] = alloca i32, align 4
4813 // CHECK11-NEXT: [[DOTOMP_IV54:%.*]] = alloca i32, align 4
4814 // CHECK11-NEXT: [[I55:%.*]] = alloca i32, align 4
4815 // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4
4816 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4
4817 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4818 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4
4819 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4820 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
4821 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4822 // CHECK11: omp.inner.for.cond:
4823 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]]
4824 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]]
4825 // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
4826 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4827 // CHECK11: omp.inner.for.body:
4828 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
4829 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
4830 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4831 // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
4832 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0
4833 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]]
4834 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP4]]
4835 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]]
4836 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4837 // CHECK11: omp.body.continue:
4838 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4839 // CHECK11: omp.inner.for.inc:
4840 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
4841 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1
4842 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
4843 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
4844 // CHECK11: omp.inner.for.end:
4845 // CHECK11-NEXT: store i32 123, ptr [[I]], align 4
4846 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
4847 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB5]], align 4
4848 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
4849 // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV6]], align 4
4850 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
4851 // CHECK11: omp.inner.for.cond8:
4852 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]]
4853 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]]
4854 // CHECK11-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
4855 // CHECK11-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]]
4856 // CHECK11: omp.inner.for.body10:
4857 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]]
4858 // CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1
4859 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
4860 // CHECK11-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]]
4861 // CHECK11-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4862 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]]
4863 // CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [123 x i32], ptr [[A13]], i32 0, i32 [[TMP10]]
4864 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX14]], align 4, !llvm.access.group [[ACC_GRP7]]
4865 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]]
4866 // CHECK11: omp.body.continue15:
4867 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]]
4868 // CHECK11: omp.inner.for.inc16:
4869 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]]
4870 // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP11]], 1
4871 // CHECK11-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]]
4872 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]]
4873 // CHECK11: omp.inner.for.end18:
4874 // CHECK11-NEXT: store i32 123, ptr [[I7]], align 4
4875 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB20]], align 4
4876 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB21]], align 4
4877 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB20]], align 4
4878 // CHECK11-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV22]], align 4
4879 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND24:%.*]]
4880 // CHECK11: omp.inner.for.cond24:
4881 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
4882 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB21]], align 4, !llvm.access.group [[ACC_GRP10]]
4883 // CHECK11-NEXT: [[CMP25:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
4884 // CHECK11-NEXT: br i1 [[CMP25]], label [[OMP_INNER_FOR_BODY26:%.*]], label [[OMP_INNER_FOR_END34:%.*]]
4885 // CHECK11: omp.inner.for.body26:
4886 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10]]
4887 // CHECK11-NEXT: [[MUL27:%.*]] = mul nsw i32 [[TMP15]], 1
4888 // CHECK11-NEXT: [[ADD28:%.*]] = add nsw i32 0, [[MUL27]]
4889 // CHECK11-NEXT: store i32 [[ADD28]], ptr [[I23]], align 4, !llvm.access.group [[ACC_GRP10]]
4890 // CHECK11-NEXT: [[A29:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4891 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[I23]], align 4, !llvm.access.group [[ACC_GRP10]]
4892 // CHECK11-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [123 x i32], ptr [[A29]], i32 0, i32 [[TMP16]]
4893 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX30]], align 4, !llvm.access.group [[ACC_GRP10]]
4894 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE31:%.*]]
4895 // CHECK11: omp.body.continue31:
4896 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC32:%.*]]
4897 // CHECK11: omp.inner.for.inc32:
4898 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10]]
4899 // CHECK11-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP17]], 1
4900 // CHECK11-NEXT: store i32 [[ADD33]], ptr [[DOTOMP_IV22]], align 4, !llvm.access.group [[ACC_GRP10]]
4901 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND24]], !llvm.loop [[LOOP11:![0-9]+]]
4902 // CHECK11: omp.inner.for.end34:
4903 // CHECK11-NEXT: store i32 123, ptr [[I23]], align 4
4904 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB36]], align 4
4905 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB37]], align 4
4906 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB36]], align 4
4907 // CHECK11-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV38]], align 4
4908 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND40:%.*]]
4909 // CHECK11: omp.inner.for.cond40:
4910 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
4911 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB37]], align 4, !llvm.access.group [[ACC_GRP13]]
4912 // CHECK11-NEXT: [[CMP41:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
4913 // CHECK11-NEXT: br i1 [[CMP41]], label [[OMP_INNER_FOR_BODY42:%.*]], label [[OMP_INNER_FOR_END50:%.*]]
4914 // CHECK11: omp.inner.for.body42:
4915 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13]]
4916 // CHECK11-NEXT: [[MUL43:%.*]] = mul nsw i32 [[TMP21]], 1
4917 // CHECK11-NEXT: [[ADD44:%.*]] = add nsw i32 0, [[MUL43]]
4918 // CHECK11-NEXT: store i32 [[ADD44]], ptr [[I39]], align 4, !llvm.access.group [[ACC_GRP13]]
4919 // CHECK11-NEXT: [[A45:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4920 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[I39]], align 4, !llvm.access.group [[ACC_GRP13]]
4921 // CHECK11-NEXT: [[ARRAYIDX46:%.*]] = getelementptr inbounds [123 x i32], ptr [[A45]], i32 0, i32 [[TMP22]]
4922 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX46]], align 4, !llvm.access.group [[ACC_GRP13]]
4923 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE47:%.*]]
4924 // CHECK11: omp.body.continue47:
4925 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC48:%.*]]
4926 // CHECK11: omp.inner.for.inc48:
4927 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13]]
4928 // CHECK11-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP23]], 1
4929 // CHECK11-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV38]], align 4, !llvm.access.group [[ACC_GRP13]]
4930 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND40]], !llvm.loop [[LOOP14:![0-9]+]]
4931 // CHECK11: omp.inner.for.end50:
4932 // CHECK11-NEXT: store i32 123, ptr [[I39]], align 4
4933 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB52]], align 4
4934 // CHECK11-NEXT: store i32 122, ptr [[DOTOMP_UB53]], align 4
4935 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB52]], align 4
4936 // CHECK11-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV54]], align 4
4937 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND56:%.*]]
4938 // CHECK11: omp.inner.for.cond56:
4939 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]]
4940 // CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB53]], align 4, !llvm.access.group [[ACC_GRP16]]
4941 // CHECK11-NEXT: [[CMP57:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]]
4942 // CHECK11-NEXT: br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
4943 // CHECK11: omp.inner.for.body58:
4944 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16]]
4945 // CHECK11-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP27]], 1
4946 // CHECK11-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
4947 // CHECK11-NEXT: store i32 [[ADD60]], ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP16]]
4948 // CHECK11-NEXT: [[A61:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4949 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP16]]
4950 // CHECK11-NEXT: [[ARRAYIDX62:%.*]] = getelementptr inbounds [123 x i32], ptr [[A61]], i32 0, i32 [[TMP28]]
4951 // CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX62]], align 4, !llvm.access.group [[ACC_GRP16]]
4952 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE63:%.*]]
4953 // CHECK11: omp.body.continue63:
4954 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC64:%.*]]
4955 // CHECK11: omp.inner.for.inc64:
4956 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16]]
4957 // CHECK11-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP29]], 1
4958 // CHECK11-NEXT: store i32 [[ADD65]], ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP16]]
4959 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP17:![0-9]+]]
4960 // CHECK11: omp.inner.for.end66:
4961 // CHECK11-NEXT: store i32 123, ptr [[I55]], align 4
4962 // CHECK11-NEXT: [[A67:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0
4963 // CHECK11-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds [123 x i32], ptr [[A67]], i32 0, i32 0
4964 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX68]], align 4
4965 // CHECK11-NEXT: ret i32 [[TMP30]]
4968 // CHECK13-LABEL: define {{[^@]+}}@main
4969 // CHECK13-SAME: (i32 noundef signext [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
4970 // CHECK13-NEXT: entry:
4971 // CHECK13-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
4972 // CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
4973 // CHECK13-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
4974 // CHECK13-NEXT: [[N:%.*]] = alloca i32, align 4
4975 // CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
4976 // CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
4977 // CHECK13-NEXT: [[M:%.*]] = alloca i32, align 4
4978 // CHECK13-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
4979 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 8
4980 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 8
4981 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 8
4982 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
4983 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
4984 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
4985 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
4986 // CHECK13-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
4987 // CHECK13-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
4988 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 8
4989 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 8
4990 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 8
4991 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 8
4992 // CHECK13-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
4993 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
4994 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
4995 // CHECK13-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
4996 // CHECK13-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
4997 // CHECK13-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8
4998 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 8
4999 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 8
5000 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 8
5001 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 8
5002 // CHECK13-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
5003 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
5004 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
5005 // CHECK13-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5006 // CHECK13-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8
5007 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 8
5008 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 8
5009 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 8
5010 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 8
5011 // CHECK13-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
5012 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
5013 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
5014 // CHECK13-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5015 // CHECK13-NEXT: [[M_CASTED48:%.*]] = alloca i64, align 8
5016 // CHECK13-NEXT: [[N_CASTED49:%.*]] = alloca i64, align 8
5017 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 8
5018 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 8
5019 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 8
5020 // CHECK13-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 8
5021 // CHECK13-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
5022 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
5023 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
5024 // CHECK13-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5025 // CHECK13-NEXT: store i32 0, ptr [[RETVAL]], align 4
5026 // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
5027 // CHECK13-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
5028 // CHECK13-NEXT: store i32 100, ptr [[N]], align 4
5029 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
5030 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
5031 // CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
5032 // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
5033 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
5034 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
5035 // CHECK13-NEXT: store i32 10, ptr [[M]], align 4
5036 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
5037 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
5038 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
5039 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
5040 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 24, i1 false)
5041 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5042 // CHECK13-NEXT: store i64 [[TMP4]], ptr [[TMP6]], align 8
5043 // CHECK13-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5044 // CHECK13-NEXT: store i64 [[TMP4]], ptr [[TMP7]], align 8
5045 // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
5046 // CHECK13-NEXT: store ptr null, ptr [[TMP8]], align 8
5047 // CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
5048 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP9]], align 8
5049 // CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
5050 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP10]], align 8
5051 // CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
5052 // CHECK13-NEXT: store ptr null, ptr [[TMP11]], align 8
5053 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
5054 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 8
5055 // CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
5056 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 8
5057 // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
5058 // CHECK13-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 8
5059 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
5060 // CHECK13-NEXT: store ptr null, ptr [[TMP15]], align 8
5061 // CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
5062 // CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
5063 // CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
5064 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
5065 // CHECK13-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
5066 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5067 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
5068 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5069 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5070 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5071 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5072 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
5073 // CHECK13-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
5074 // CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
5075 // CHECK13-NEXT: store i32 2, ptr [[TMP23]], align 4
5076 // CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
5077 // CHECK13-NEXT: store i32 3, ptr [[TMP24]], align 4
5078 // CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
5079 // CHECK13-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 8
5080 // CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
5081 // CHECK13-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 8
5082 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
5083 // CHECK13-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 8
5084 // CHECK13-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
5085 // CHECK13-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 8
5086 // CHECK13-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
5087 // CHECK13-NEXT: store ptr null, ptr [[TMP29]], align 8
5088 // CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
5089 // CHECK13-NEXT: store ptr null, ptr [[TMP30]], align 8
5090 // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
5091 // CHECK13-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
5092 // CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
5093 // CHECK13-NEXT: store i64 0, ptr [[TMP32]], align 8
5094 // CHECK13-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
5095 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
5096 // CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
5097 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
5098 // CHECK13-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
5099 // CHECK13-NEXT: store i32 0, ptr [[TMP35]], align 4
5100 // CHECK13-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
5101 // CHECK13-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
5102 // CHECK13-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5103 // CHECK13: omp_offload.failed:
5104 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4:[0-9]+]]
5105 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
5106 // CHECK13: omp_offload.cont:
5107 // CHECK13-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
5108 // CHECK13-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
5109 // CHECK13-NEXT: [[TMP39:%.*]] = load i64, ptr [[N_CASTED3]], align 8
5110 // CHECK13-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP1]], 4
5111 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES7]], ptr align 8 @.offload_sizes.1, i64 24, i1 false)
5112 // CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
5113 // CHECK13-NEXT: store i64 [[TMP39]], ptr [[TMP41]], align 8
5114 // CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
5115 // CHECK13-NEXT: store i64 [[TMP39]], ptr [[TMP42]], align 8
5116 // CHECK13-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0
5117 // CHECK13-NEXT: store ptr null, ptr [[TMP43]], align 8
5118 // CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
5119 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP44]], align 8
5120 // CHECK13-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
5121 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP45]], align 8
5122 // CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 1
5123 // CHECK13-NEXT: store ptr null, ptr [[TMP46]], align 8
5124 // CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
5125 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP47]], align 8
5126 // CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
5127 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 8
5128 // CHECK13-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
5129 // CHECK13-NEXT: store i64 [[TMP40]], ptr [[TMP49]], align 8
5130 // CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 2
5131 // CHECK13-NEXT: store ptr null, ptr [[TMP50]], align 8
5132 // CHECK13-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
5133 // CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
5134 // CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
5135 // CHECK13-NEXT: [[TMP54:%.*]] = load i32, ptr [[N]], align 4
5136 // CHECK13-NEXT: store i32 [[TMP54]], ptr [[DOTCAPTURE_EXPR_9]], align 4
5137 // CHECK13-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
5138 // CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP55]], 0
5139 // CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
5140 // CHECK13-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
5141 // CHECK13-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
5142 // CHECK13-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
5143 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP56]], 1
5144 // CHECK13-NEXT: [[TMP57:%.*]] = zext i32 [[ADD14]] to i64
5145 // CHECK13-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
5146 // CHECK13-NEXT: store i32 2, ptr [[TMP58]], align 4
5147 // CHECK13-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
5148 // CHECK13-NEXT: store i32 3, ptr [[TMP59]], align 4
5149 // CHECK13-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
5150 // CHECK13-NEXT: store ptr [[TMP51]], ptr [[TMP60]], align 8
5151 // CHECK13-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
5152 // CHECK13-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 8
5153 // CHECK13-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
5154 // CHECK13-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 8
5155 // CHECK13-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
5156 // CHECK13-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP63]], align 8
5157 // CHECK13-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
5158 // CHECK13-NEXT: store ptr null, ptr [[TMP64]], align 8
5159 // CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
5160 // CHECK13-NEXT: store ptr null, ptr [[TMP65]], align 8
5161 // CHECK13-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
5162 // CHECK13-NEXT: store i64 [[TMP57]], ptr [[TMP66]], align 8
5163 // CHECK13-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
5164 // CHECK13-NEXT: store i64 0, ptr [[TMP67]], align 8
5165 // CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
5166 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP68]], align 4
5167 // CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
5168 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
5169 // CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
5170 // CHECK13-NEXT: store i32 0, ptr [[TMP70]], align 4
5171 // CHECK13-NEXT: [[TMP71:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
5172 // CHECK13-NEXT: [[TMP72:%.*]] = icmp ne i32 [[TMP71]], 0
5173 // CHECK13-NEXT: br i1 [[TMP72]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
5174 // CHECK13: omp_offload.failed16:
5175 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP39]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
5176 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT17]]
5177 // CHECK13: omp_offload.cont17:
5178 // CHECK13-NEXT: [[TMP73:%.*]] = load i32, ptr [[M]], align 4
5179 // CHECK13-NEXT: store i32 [[TMP73]], ptr [[M_CASTED]], align 4
5180 // CHECK13-NEXT: [[TMP74:%.*]] = load i64, ptr [[M_CASTED]], align 8
5181 // CHECK13-NEXT: [[TMP75:%.*]] = load i32, ptr [[N]], align 4
5182 // CHECK13-NEXT: store i32 [[TMP75]], ptr [[N_CASTED18]], align 4
5183 // CHECK13-NEXT: [[TMP76:%.*]] = load i64, ptr [[N_CASTED18]], align 8
5184 // CHECK13-NEXT: [[TMP77:%.*]] = mul nuw i64 [[TMP1]], 4
5185 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES22]], ptr align 8 @.offload_sizes.3, i64 32, i1 false)
5186 // CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
5187 // CHECK13-NEXT: store i64 [[TMP74]], ptr [[TMP78]], align 8
5188 // CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
5189 // CHECK13-NEXT: store i64 [[TMP74]], ptr [[TMP79]], align 8
5190 // CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
5191 // CHECK13-NEXT: store ptr null, ptr [[TMP80]], align 8
5192 // CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
5193 // CHECK13-NEXT: store i64 [[TMP76]], ptr [[TMP81]], align 8
5194 // CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
5195 // CHECK13-NEXT: store i64 [[TMP76]], ptr [[TMP82]], align 8
5196 // CHECK13-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1
5197 // CHECK13-NEXT: store ptr null, ptr [[TMP83]], align 8
5198 // CHECK13-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
5199 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP84]], align 8
5200 // CHECK13-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
5201 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP85]], align 8
5202 // CHECK13-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2
5203 // CHECK13-NEXT: store ptr null, ptr [[TMP86]], align 8
5204 // CHECK13-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
5205 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP87]], align 8
5206 // CHECK13-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
5207 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP88]], align 8
5208 // CHECK13-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
5209 // CHECK13-NEXT: store i64 [[TMP77]], ptr [[TMP89]], align 8
5210 // CHECK13-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 3
5211 // CHECK13-NEXT: store ptr null, ptr [[TMP90]], align 8
5212 // CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
5213 // CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
5214 // CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
5215 // CHECK13-NEXT: [[TMP94:%.*]] = load i32, ptr [[N]], align 4
5216 // CHECK13-NEXT: store i32 [[TMP94]], ptr [[DOTCAPTURE_EXPR_24]], align 4
5217 // CHECK13-NEXT: [[TMP95:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
5218 // CHECK13-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP95]], 0
5219 // CHECK13-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
5220 // CHECK13-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
5221 // CHECK13-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
5222 // CHECK13-NEXT: [[TMP96:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
5223 // CHECK13-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP96]], 1
5224 // CHECK13-NEXT: [[TMP97:%.*]] = zext i32 [[ADD29]] to i64
5225 // CHECK13-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
5226 // CHECK13-NEXT: store i32 2, ptr [[TMP98]], align 4
5227 // CHECK13-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
5228 // CHECK13-NEXT: store i32 4, ptr [[TMP99]], align 4
5229 // CHECK13-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
5230 // CHECK13-NEXT: store ptr [[TMP91]], ptr [[TMP100]], align 8
5231 // CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
5232 // CHECK13-NEXT: store ptr [[TMP92]], ptr [[TMP101]], align 8
5233 // CHECK13-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
5234 // CHECK13-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 8
5235 // CHECK13-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
5236 // CHECK13-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP103]], align 8
5237 // CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
5238 // CHECK13-NEXT: store ptr null, ptr [[TMP104]], align 8
5239 // CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
5240 // CHECK13-NEXT: store ptr null, ptr [[TMP105]], align 8
5241 // CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
5242 // CHECK13-NEXT: store i64 [[TMP97]], ptr [[TMP106]], align 8
5243 // CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
5244 // CHECK13-NEXT: store i64 0, ptr [[TMP107]], align 8
5245 // CHECK13-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
5246 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP108]], align 4
5247 // CHECK13-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
5248 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP109]], align 4
5249 // CHECK13-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
5250 // CHECK13-NEXT: store i32 0, ptr [[TMP110]], align 4
5251 // CHECK13-NEXT: [[TMP111:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
5252 // CHECK13-NEXT: [[TMP112:%.*]] = icmp ne i32 [[TMP111]], 0
5253 // CHECK13-NEXT: br i1 [[TMP112]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
5254 // CHECK13: omp_offload.failed31:
5255 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP74]], i64 [[TMP76]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
5256 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT32]]
5257 // CHECK13: omp_offload.cont32:
5258 // CHECK13-NEXT: [[TMP113:%.*]] = load i32, ptr [[N]], align 4
5259 // CHECK13-NEXT: store i32 [[TMP113]], ptr [[N_CASTED33]], align 4
5260 // CHECK13-NEXT: [[TMP114:%.*]] = load i64, ptr [[N_CASTED33]], align 8
5261 // CHECK13-NEXT: [[TMP115:%.*]] = mul nuw i64 [[TMP1]], 4
5262 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES37]], ptr align 8 @.offload_sizes.5, i64 24, i1 false)
5263 // CHECK13-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
5264 // CHECK13-NEXT: store i64 [[TMP114]], ptr [[TMP116]], align 8
5265 // CHECK13-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
5266 // CHECK13-NEXT: store i64 [[TMP114]], ptr [[TMP117]], align 8
5267 // CHECK13-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
5268 // CHECK13-NEXT: store ptr null, ptr [[TMP118]], align 8
5269 // CHECK13-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
5270 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP119]], align 8
5271 // CHECK13-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
5272 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP120]], align 8
5273 // CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
5274 // CHECK13-NEXT: store ptr null, ptr [[TMP121]], align 8
5275 // CHECK13-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
5276 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP122]], align 8
5277 // CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
5278 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP123]], align 8
5279 // CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
5280 // CHECK13-NEXT: store i64 [[TMP115]], ptr [[TMP124]], align 8
5281 // CHECK13-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
5282 // CHECK13-NEXT: store ptr null, ptr [[TMP125]], align 8
5283 // CHECK13-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
5284 // CHECK13-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
5285 // CHECK13-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
5286 // CHECK13-NEXT: [[TMP129:%.*]] = load i32, ptr [[N]], align 4
5287 // CHECK13-NEXT: store i32 [[TMP129]], ptr [[DOTCAPTURE_EXPR_39]], align 4
5288 // CHECK13-NEXT: [[TMP130:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
5289 // CHECK13-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP130]], 0
5290 // CHECK13-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
5291 // CHECK13-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
5292 // CHECK13-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
5293 // CHECK13-NEXT: [[TMP131:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
5294 // CHECK13-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP131]], 1
5295 // CHECK13-NEXT: [[TMP132:%.*]] = zext i32 [[ADD44]] to i64
5296 // CHECK13-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
5297 // CHECK13-NEXT: store i32 2, ptr [[TMP133]], align 4
5298 // CHECK13-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
5299 // CHECK13-NEXT: store i32 3, ptr [[TMP134]], align 4
5300 // CHECK13-NEXT: [[TMP135:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
5301 // CHECK13-NEXT: store ptr [[TMP126]], ptr [[TMP135]], align 8
5302 // CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
5303 // CHECK13-NEXT: store ptr [[TMP127]], ptr [[TMP136]], align 8
5304 // CHECK13-NEXT: [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
5305 // CHECK13-NEXT: store ptr [[TMP128]], ptr [[TMP137]], align 8
5306 // CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
5307 // CHECK13-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP138]], align 8
5308 // CHECK13-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
5309 // CHECK13-NEXT: store ptr null, ptr [[TMP139]], align 8
5310 // CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
5311 // CHECK13-NEXT: store ptr null, ptr [[TMP140]], align 8
5312 // CHECK13-NEXT: [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
5313 // CHECK13-NEXT: store i64 [[TMP132]], ptr [[TMP141]], align 8
5314 // CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
5315 // CHECK13-NEXT: store i64 0, ptr [[TMP142]], align 8
5316 // CHECK13-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
5317 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP143]], align 4
5318 // CHECK13-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
5319 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP144]], align 4
5320 // CHECK13-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
5321 // CHECK13-NEXT: store i32 0, ptr [[TMP145]], align 4
5322 // CHECK13-NEXT: [[TMP146:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
5323 // CHECK13-NEXT: [[TMP147:%.*]] = icmp ne i32 [[TMP146]], 0
5324 // CHECK13-NEXT: br i1 [[TMP147]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
5325 // CHECK13: omp_offload.failed46:
5326 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP114]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
5327 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT47]]
5328 // CHECK13: omp_offload.cont47:
5329 // CHECK13-NEXT: [[TMP148:%.*]] = load i32, ptr [[M]], align 4
5330 // CHECK13-NEXT: store i32 [[TMP148]], ptr [[M_CASTED48]], align 4
5331 // CHECK13-NEXT: [[TMP149:%.*]] = load i64, ptr [[M_CASTED48]], align 8
5332 // CHECK13-NEXT: [[TMP150:%.*]] = load i32, ptr [[N]], align 4
5333 // CHECK13-NEXT: store i32 [[TMP150]], ptr [[N_CASTED49]], align 4
5334 // CHECK13-NEXT: [[TMP151:%.*]] = load i64, ptr [[N_CASTED49]], align 8
5335 // CHECK13-NEXT: [[TMP152:%.*]] = mul nuw i64 [[TMP1]], 4
5336 // CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES53]], ptr align 8 @.offload_sizes.7, i64 32, i1 false)
5337 // CHECK13-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
5338 // CHECK13-NEXT: store i64 [[TMP149]], ptr [[TMP153]], align 8
5339 // CHECK13-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
5340 // CHECK13-NEXT: store i64 [[TMP149]], ptr [[TMP154]], align 8
5341 // CHECK13-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
5342 // CHECK13-NEXT: store ptr null, ptr [[TMP155]], align 8
5343 // CHECK13-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
5344 // CHECK13-NEXT: store i64 [[TMP151]], ptr [[TMP156]], align 8
5345 // CHECK13-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
5346 // CHECK13-NEXT: store i64 [[TMP151]], ptr [[TMP157]], align 8
5347 // CHECK13-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
5348 // CHECK13-NEXT: store ptr null, ptr [[TMP158]], align 8
5349 // CHECK13-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
5350 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP159]], align 8
5351 // CHECK13-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
5352 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[TMP160]], align 8
5353 // CHECK13-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
5354 // CHECK13-NEXT: store ptr null, ptr [[TMP161]], align 8
5355 // CHECK13-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
5356 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP162]], align 8
5357 // CHECK13-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
5358 // CHECK13-NEXT: store ptr [[VLA]], ptr [[TMP163]], align 8
5359 // CHECK13-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
5360 // CHECK13-NEXT: store i64 [[TMP152]], ptr [[TMP164]], align 8
5361 // CHECK13-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
5362 // CHECK13-NEXT: store ptr null, ptr [[TMP165]], align 8
5363 // CHECK13-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
5364 // CHECK13-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
5365 // CHECK13-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
5366 // CHECK13-NEXT: [[TMP169:%.*]] = load i32, ptr [[N]], align 4
5367 // CHECK13-NEXT: store i32 [[TMP169]], ptr [[DOTCAPTURE_EXPR_55]], align 4
5368 // CHECK13-NEXT: [[TMP170:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
5369 // CHECK13-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP170]], 0
5370 // CHECK13-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
5371 // CHECK13-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
5372 // CHECK13-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
5373 // CHECK13-NEXT: [[TMP171:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
5374 // CHECK13-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP171]], 1
5375 // CHECK13-NEXT: [[TMP172:%.*]] = zext i32 [[ADD60]] to i64
5376 // CHECK13-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
5377 // CHECK13-NEXT: store i32 2, ptr [[TMP173]], align 4
5378 // CHECK13-NEXT: [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
5379 // CHECK13-NEXT: store i32 4, ptr [[TMP174]], align 4
5380 // CHECK13-NEXT: [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
5381 // CHECK13-NEXT: store ptr [[TMP166]], ptr [[TMP175]], align 8
5382 // CHECK13-NEXT: [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
5383 // CHECK13-NEXT: store ptr [[TMP167]], ptr [[TMP176]], align 8
5384 // CHECK13-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
5385 // CHECK13-NEXT: store ptr [[TMP168]], ptr [[TMP177]], align 8
5386 // CHECK13-NEXT: [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
5387 // CHECK13-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP178]], align 8
5388 // CHECK13-NEXT: [[TMP179:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
5389 // CHECK13-NEXT: store ptr null, ptr [[TMP179]], align 8
5390 // CHECK13-NEXT: [[TMP180:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
5391 // CHECK13-NEXT: store ptr null, ptr [[TMP180]], align 8
5392 // CHECK13-NEXT: [[TMP181:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
5393 // CHECK13-NEXT: store i64 [[TMP172]], ptr [[TMP181]], align 8
5394 // CHECK13-NEXT: [[TMP182:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
5395 // CHECK13-NEXT: store i64 0, ptr [[TMP182]], align 8
5396 // CHECK13-NEXT: [[TMP183:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
5397 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP183]], align 4
5398 // CHECK13-NEXT: [[TMP184:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
5399 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP184]], align 4
5400 // CHECK13-NEXT: [[TMP185:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
5401 // CHECK13-NEXT: store i32 0, ptr [[TMP185]], align 4
5402 // CHECK13-NEXT: [[TMP186:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
5403 // CHECK13-NEXT: [[TMP187:%.*]] = icmp ne i32 [[TMP186]], 0
5404 // CHECK13-NEXT: br i1 [[TMP187]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
5405 // CHECK13: omp_offload.failed62:
5406 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP149]], i64 [[TMP151]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
5407 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT63]]
5408 // CHECK13: omp_offload.cont63:
5409 // CHECK13-NEXT: [[TMP188:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
5410 // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]])
5411 // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
5412 // CHECK13-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
5413 // CHECK13-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]])
5414 // CHECK13-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4
5415 // CHECK13-NEXT: ret i32 [[TMP190]]
5418 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
5419 // CHECK13-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
5420 // CHECK13-NEXT: entry:
5421 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5422 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5423 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5424 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
5425 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5426 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5427 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5428 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5429 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
5430 // CHECK13-NEXT: ret void
5433 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
5434 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3:[0-9]+]] {
5435 // CHECK13-NEXT: entry:
5436 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5437 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5438 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5439 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5440 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5441 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5442 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5443 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5444 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5445 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5446 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5447 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5448 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5449 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5450 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
5451 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5452 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5453 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5454 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5455 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5456 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5457 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5458 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5459 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5460 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5461 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5462 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5463 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5464 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5465 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5466 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5467 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5468 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5469 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5470 // CHECK13: omp.precond.then:
5471 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5472 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5473 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
5474 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5475 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5476 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5477 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
5478 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5479 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5480 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5481 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
5482 // CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5483 // CHECK13: cond.true:
5484 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5485 // CHECK13-NEXT: br label [[COND_END:%.*]]
5486 // CHECK13: cond.false:
5487 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5488 // CHECK13-NEXT: br label [[COND_END]]
5489 // CHECK13: cond.end:
5490 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
5491 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5492 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5493 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
5494 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5495 // CHECK13: omp.inner.for.cond:
5496 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
5497 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
5498 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
5499 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5500 // CHECK13: omp.inner.for.body:
5501 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP13]]
5502 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
5503 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
5504 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
5505 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP13]]
5506 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5507 // CHECK13: omp.inner.for.inc:
5508 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
5509 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP13]]
5510 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5511 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
5512 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
5513 // CHECK13: omp.inner.for.end:
5514 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5515 // CHECK13: omp.loop.exit:
5516 // CHECK13-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5517 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
5518 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
5519 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5520 // CHECK13-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
5521 // CHECK13-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5522 // CHECK13: .omp.final.then:
5523 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5524 // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
5525 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5526 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5527 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5528 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
5529 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5530 // CHECK13: .omp.final.done:
5531 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5532 // CHECK13: omp.precond.end:
5533 // CHECK13-NEXT: ret void
5536 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
5537 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
5538 // CHECK13-NEXT: entry:
5539 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5540 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5541 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5542 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5543 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5544 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5545 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5546 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5547 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5548 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5549 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5550 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5551 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5552 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5553 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5554 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5555 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
5556 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5557 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5558 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5559 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5560 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5561 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5562 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5563 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5564 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5565 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5566 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5567 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5568 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5569 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5570 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5571 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5572 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5573 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5574 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5575 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5576 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5577 // CHECK13: omp.precond.then:
5578 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5579 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5580 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
5581 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5582 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
5583 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5584 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
5585 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5586 // CHECK13-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
5587 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5588 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5589 // CHECK13-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5590 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
5591 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5592 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5593 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5594 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
5595 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5596 // CHECK13: cond.true:
5597 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5598 // CHECK13-NEXT: br label [[COND_END:%.*]]
5599 // CHECK13: cond.false:
5600 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5601 // CHECK13-NEXT: br label [[COND_END]]
5602 // CHECK13: cond.end:
5603 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
5604 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5605 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5606 // CHECK13-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
5607 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5608 // CHECK13: omp.inner.for.cond:
5609 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
5610 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
5611 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
5612 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5613 // CHECK13: omp.inner.for.body:
5614 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
5615 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
5616 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5617 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
5618 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
5619 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
5620 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
5621 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]]
5622 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5623 // CHECK13: omp.body.continue:
5624 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5625 // CHECK13: omp.inner.for.inc:
5626 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
5627 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
5628 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
5629 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
5630 // CHECK13: omp.inner.for.end:
5631 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5632 // CHECK13: omp.loop.exit:
5633 // CHECK13-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5634 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
5635 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
5636 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5637 // CHECK13-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5638 // CHECK13-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5639 // CHECK13: .omp.final.then:
5640 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5641 // CHECK13-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
5642 // CHECK13-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
5643 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
5644 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
5645 // CHECK13-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
5646 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5647 // CHECK13: .omp.final.done:
5648 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5649 // CHECK13: omp.precond.end:
5650 // CHECK13-NEXT: ret void
5653 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
5654 // CHECK13-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5655 // CHECK13-NEXT: entry:
5656 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5657 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5658 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5659 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
5660 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5661 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5662 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5663 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5664 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
5665 // CHECK13-NEXT: ret void
5668 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
5669 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
5670 // CHECK13-NEXT: entry:
5671 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5672 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5673 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5674 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5675 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5676 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5677 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5678 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5679 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5680 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5681 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5682 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5683 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5684 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5685 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
5686 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5687 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5688 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5689 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5690 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5691 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5692 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5693 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5694 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5695 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5696 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5697 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5698 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5699 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5700 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5701 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5702 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5703 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5704 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5705 // CHECK13: omp.precond.then:
5706 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5707 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5708 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
5709 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5710 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5711 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5712 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
5713 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5714 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5715 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5716 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
5717 // CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5718 // CHECK13: cond.true:
5719 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5720 // CHECK13-NEXT: br label [[COND_END:%.*]]
5721 // CHECK13: cond.false:
5722 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5723 // CHECK13-NEXT: br label [[COND_END]]
5724 // CHECK13: cond.end:
5725 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
5726 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5727 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5728 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
5729 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5730 // CHECK13: omp.inner.for.cond:
5731 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
5732 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
5733 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
5734 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5735 // CHECK13: omp.inner.for.body:
5736 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP22]]
5737 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
5738 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
5739 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
5740 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP22]]
5741 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5742 // CHECK13: omp.inner.for.inc:
5743 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
5744 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP22]]
5745 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
5746 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
5747 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
5748 // CHECK13: omp.inner.for.end:
5749 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5750 // CHECK13: omp.loop.exit:
5751 // CHECK13-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5752 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
5753 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
5754 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5755 // CHECK13-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
5756 // CHECK13-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5757 // CHECK13: .omp.final.then:
5758 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5759 // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
5760 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
5761 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
5762 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
5763 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
5764 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5765 // CHECK13: .omp.final.done:
5766 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5767 // CHECK13: omp.precond.end:
5768 // CHECK13-NEXT: ret void
5771 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
5772 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
5773 // CHECK13-NEXT: entry:
5774 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5775 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5776 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5777 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5778 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5779 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5780 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5781 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5782 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5783 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5784 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5785 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5786 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5787 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5788 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5789 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5790 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
5791 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5792 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5793 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5794 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5795 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5796 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5797 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5798 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5799 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5800 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5801 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5802 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
5803 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5804 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5805 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5806 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
5807 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5808 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5809 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5810 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5811 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5812 // CHECK13: omp.precond.then:
5813 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5814 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5815 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
5816 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5817 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
5818 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5819 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
5820 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5821 // CHECK13-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
5822 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5823 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5824 // CHECK13-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5825 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
5826 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5827 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5828 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5829 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
5830 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5831 // CHECK13: cond.true:
5832 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5833 // CHECK13-NEXT: br label [[COND_END:%.*]]
5834 // CHECK13: cond.false:
5835 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5836 // CHECK13-NEXT: br label [[COND_END]]
5837 // CHECK13: cond.end:
5838 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
5839 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5840 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5841 // CHECK13-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
5842 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5843 // CHECK13: omp.inner.for.cond:
5844 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
5845 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
5846 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
5847 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5848 // CHECK13: omp.inner.for.body:
5849 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
5850 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
5851 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5852 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
5853 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
5854 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
5855 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
5856 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
5857 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5858 // CHECK13: omp.body.continue:
5859 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5860 // CHECK13: omp.inner.for.inc:
5861 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
5862 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
5863 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
5864 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
5865 // CHECK13: omp.inner.for.end:
5866 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5867 // CHECK13: omp.loop.exit:
5868 // CHECK13-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5869 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
5870 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
5871 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5872 // CHECK13-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
5873 // CHECK13-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5874 // CHECK13: .omp.final.then:
5875 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5876 // CHECK13-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
5877 // CHECK13-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
5878 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
5879 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
5880 // CHECK13-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
5881 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5882 // CHECK13: .omp.final.done:
5883 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
5884 // CHECK13: omp.precond.end:
5885 // CHECK13-NEXT: ret void
5888 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
5889 // CHECK13-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
5890 // CHECK13-NEXT: entry:
5891 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
5892 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
5893 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5894 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5895 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
5896 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
5897 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
5898 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
5899 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5900 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5901 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5902 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5903 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
5904 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
5905 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
5906 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
5907 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
5908 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
5909 // CHECK13-NEXT: ret void
5912 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
5913 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
5914 // CHECK13-NEXT: entry:
5915 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5916 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5917 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
5918 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
5919 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5920 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
5921 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5922 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5923 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
5924 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
5925 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5926 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5927 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5928 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5929 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5930 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
5931 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
5932 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5933 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5934 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
5935 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
5936 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5937 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
5938 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
5939 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
5940 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5941 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
5942 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
5943 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5944 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
5945 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
5946 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
5947 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
5948 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
5949 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
5950 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
5951 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
5952 // CHECK13: omp.precond.then:
5953 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5954 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
5955 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
5956 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5957 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5958 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
5959 // CHECK13-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5960 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
5961 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
5962 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5963 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
5964 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
5965 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5966 // CHECK13: cond.true:
5967 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
5968 // CHECK13-NEXT: br label [[COND_END:%.*]]
5969 // CHECK13: cond.false:
5970 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5971 // CHECK13-NEXT: br label [[COND_END]]
5972 // CHECK13: cond.end:
5973 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
5974 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5975 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5976 // CHECK13-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
5977 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5978 // CHECK13: omp.inner.for.cond:
5979 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
5980 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
5981 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
5982 // CHECK13-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
5983 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5984 // CHECK13: omp.inner.for.body:
5985 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
5986 // CHECK13-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
5987 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
5988 // CHECK13-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
5989 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
5990 // CHECK13-NEXT: store i32 [[TMP21]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP28]]
5991 // CHECK13-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP28]]
5992 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i64 [[TMP18]], i64 [[TMP20]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP22]]), !llvm.access.group [[ACC_GRP28]]
5993 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5994 // CHECK13: omp.inner.for.inc:
5995 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
5996 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
5997 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
5998 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
5999 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
6000 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
6001 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
6002 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
6003 // CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
6004 // CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
6005 // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
6006 // CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
6007 // CHECK13-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
6008 // CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
6009 // CHECK13-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
6010 // CHECK13-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
6011 // CHECK13: cond.true11:
6012 // CHECK13-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
6013 // CHECK13-NEXT: br label [[COND_END13:%.*]]
6014 // CHECK13: cond.false12:
6015 // CHECK13-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
6016 // CHECK13-NEXT: br label [[COND_END13]]
6017 // CHECK13: cond.end13:
6018 // CHECK13-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE11]] ], [ [[TMP32]], [[COND_FALSE12]] ]
6019 // CHECK13-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
6020 // CHECK13-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
6021 // CHECK13-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
6022 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
6023 // CHECK13: omp.inner.for.end:
6024 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6025 // CHECK13: omp.loop.exit:
6026 // CHECK13-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6027 // CHECK13-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4
6028 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP35]])
6029 // CHECK13-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6030 // CHECK13-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
6031 // CHECK13-NEXT: br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6032 // CHECK13: .omp.final.then:
6033 // CHECK13-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6034 // CHECK13-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP38]], 0
6035 // CHECK13-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
6036 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
6037 // CHECK13-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
6038 // CHECK13-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
6039 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6040 // CHECK13: .omp.final.done:
6041 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6042 // CHECK13: omp.precond.end:
6043 // CHECK13-NEXT: ret void
6046 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
6047 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
6048 // CHECK13-NEXT: entry:
6049 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6050 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6051 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6052 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6053 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6054 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6055 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6056 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6057 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6058 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6059 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6060 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6061 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6062 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6063 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6064 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6065 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6066 // CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
6067 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6068 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6069 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6070 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6071 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6072 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6073 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6074 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
6075 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6076 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6077 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6078 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6079 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6080 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6081 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6082 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6083 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6084 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
6085 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6086 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6087 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6088 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6089 // CHECK13: omp.precond.then:
6090 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6091 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6092 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
6093 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6094 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
6095 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6096 // CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
6097 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
6098 // CHECK13-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
6099 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6100 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6101 // CHECK13-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6102 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
6103 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6104 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6105 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6106 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
6107 // CHECK13-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6108 // CHECK13: cond.true:
6109 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6110 // CHECK13-NEXT: br label [[COND_END:%.*]]
6111 // CHECK13: cond.false:
6112 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6113 // CHECK13-NEXT: br label [[COND_END]]
6114 // CHECK13: cond.end:
6115 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
6116 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
6117 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6118 // CHECK13-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
6119 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6120 // CHECK13: omp.inner.for.cond:
6121 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
6122 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]]
6123 // CHECK13-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
6124 // CHECK13-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6125 // CHECK13: omp.inner.for.body:
6126 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
6127 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
6128 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6129 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
6130 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
6131 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
6132 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
6133 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
6134 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6135 // CHECK13: omp.body.continue:
6136 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6137 // CHECK13: omp.inner.for.inc:
6138 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
6139 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], 1
6140 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
6141 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
6142 // CHECK13: omp.inner.for.end:
6143 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6144 // CHECK13: omp.loop.exit:
6145 // CHECK13-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6146 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
6147 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
6148 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6149 // CHECK13-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6150 // CHECK13-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6151 // CHECK13: .omp.final.then:
6152 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6153 // CHECK13-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP25]], 0
6154 // CHECK13-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
6155 // CHECK13-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
6156 // CHECK13-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
6157 // CHECK13-NEXT: store i32 [[ADD12]], ptr [[I5]], align 4
6158 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6159 // CHECK13: .omp.final.done:
6160 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6161 // CHECK13: omp.precond.end:
6162 // CHECK13-NEXT: ret void
6165 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
6166 // CHECK13-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
6167 // CHECK13-NEXT: entry:
6168 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6169 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6170 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6171 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
6172 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6173 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6174 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6175 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6176 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
6177 // CHECK13-NEXT: ret void
6180 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
6181 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
6182 // CHECK13-NEXT: entry:
6183 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6184 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6185 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6186 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6187 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6188 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6189 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6190 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6191 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6192 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6193 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6194 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6195 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6196 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6197 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4
6198 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6199 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6200 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6201 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6202 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6203 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6204 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6205 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6206 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6207 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
6208 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6209 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6210 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6211 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6212 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6213 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6214 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6215 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6216 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6217 // CHECK13: omp.precond.then:
6218 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
6219 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6220 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
6221 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6222 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6223 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6224 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
6225 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6226 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6227 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6228 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
6229 // CHECK13-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6230 // CHECK13: cond.true:
6231 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6232 // CHECK13-NEXT: br label [[COND_END:%.*]]
6233 // CHECK13: cond.false:
6234 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6235 // CHECK13-NEXT: br label [[COND_END]]
6236 // CHECK13: cond.end:
6237 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
6238 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
6239 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
6240 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
6241 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6242 // CHECK13: omp.inner.for.cond:
6243 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
6244 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
6245 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
6246 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6247 // CHECK13: omp.inner.for.body:
6248 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP34]]
6249 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
6250 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
6251 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
6252 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP34]]
6253 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6254 // CHECK13: omp.inner.for.inc:
6255 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
6256 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP34]]
6257 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
6258 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
6259 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
6260 // CHECK13: omp.inner.for.end:
6261 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6262 // CHECK13: omp.loop.exit:
6263 // CHECK13-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6264 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
6265 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
6266 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6267 // CHECK13-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
6268 // CHECK13-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6269 // CHECK13: .omp.final.then:
6270 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6271 // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
6272 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
6273 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
6274 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
6275 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
6276 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6277 // CHECK13: .omp.final.done:
6278 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6279 // CHECK13: omp.precond.end:
6280 // CHECK13-NEXT: ret void
6283 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
6284 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
6285 // CHECK13-NEXT: entry:
6286 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6287 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6288 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6289 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6290 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6291 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6292 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6293 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6294 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6295 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6296 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6297 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6298 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6299 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6300 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6301 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6302 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
6303 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6304 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6305 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6306 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6307 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6308 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6309 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6310 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6311 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6312 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6313 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6314 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
6315 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6316 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6317 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6318 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
6319 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6320 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6321 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6322 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6323 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6324 // CHECK13: omp.precond.then:
6325 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6326 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6327 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
6328 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6329 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
6330 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6331 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
6332 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
6333 // CHECK13-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
6334 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6335 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6336 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6337 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6338 // CHECK13-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6339 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
6340 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
6341 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
6342 // CHECK13: omp.dispatch.cond:
6343 // CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6344 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
6345 // CHECK13-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
6346 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
6347 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6348 // CHECK13: omp.dispatch.body:
6349 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6350 // CHECK13-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
6351 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6352 // CHECK13: omp.inner.for.cond:
6353 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]]
6354 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]]
6355 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
6356 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6357 // CHECK13: omp.inner.for.body:
6358 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
6359 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
6360 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6361 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
6362 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
6363 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
6364 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
6365 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]]
6366 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6367 // CHECK13: omp.body.continue:
6368 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6369 // CHECK13: omp.inner.for.inc:
6370 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
6371 // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
6372 // CHECK13-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
6373 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
6374 // CHECK13: omp.inner.for.end:
6375 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
6376 // CHECK13: omp.dispatch.inc:
6377 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
6378 // CHECK13: omp.dispatch.end:
6379 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6380 // CHECK13-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
6381 // CHECK13-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6382 // CHECK13: .omp.final.then:
6383 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6384 // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
6385 // CHECK13-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6386 // CHECK13-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
6387 // CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
6388 // CHECK13-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
6389 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6390 // CHECK13: .omp.final.done:
6391 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6392 // CHECK13: omp.precond.end:
6393 // CHECK13-NEXT: ret void
6396 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
6397 // CHECK13-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
6398 // CHECK13-NEXT: entry:
6399 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
6400 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
6401 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6402 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6403 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
6404 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6405 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
6406 // CHECK13-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
6407 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6408 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6409 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6410 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6411 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
6412 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
6413 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
6414 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
6415 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
6416 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
6417 // CHECK13-NEXT: ret void
6420 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
6421 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
6422 // CHECK13-NEXT: entry:
6423 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6424 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6425 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6426 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6427 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6428 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6429 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6430 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6431 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6432 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6433 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6434 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6435 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6436 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6437 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6438 // CHECK13-NEXT: [[I4:%.*]] = alloca i32, align 4
6439 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
6440 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6441 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6442 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6443 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6444 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6445 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
6446 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6447 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6448 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6449 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6450 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6451 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6452 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6453 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6454 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6455 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
6456 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6457 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6458 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6459 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6460 // CHECK13: omp.precond.then:
6461 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
6462 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6463 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
6464 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6465 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6466 // CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6467 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
6468 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6469 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6470 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6471 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
6472 // CHECK13-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6473 // CHECK13: cond.true:
6474 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6475 // CHECK13-NEXT: br label [[COND_END:%.*]]
6476 // CHECK13: cond.false:
6477 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6478 // CHECK13-NEXT: br label [[COND_END]]
6479 // CHECK13: cond.end:
6480 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
6481 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
6482 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
6483 // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
6484 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6485 // CHECK13: omp.inner.for.cond:
6486 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]]
6487 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
6488 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
6489 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6490 // CHECK13: omp.inner.for.body:
6491 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP40]]
6492 // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
6493 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
6494 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
6495 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP40]]
6496 // CHECK13-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP40]]
6497 // CHECK13-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP40]]
6498 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP21]]), !llvm.access.group [[ACC_GRP40]]
6499 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6500 // CHECK13: omp.inner.for.inc:
6501 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
6502 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP40]]
6503 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
6504 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
6505 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
6506 // CHECK13: omp.inner.for.end:
6507 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6508 // CHECK13: omp.loop.exit:
6509 // CHECK13-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6510 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
6511 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP25]])
6512 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6513 // CHECK13-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
6514 // CHECK13-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6515 // CHECK13: .omp.final.then:
6516 // CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6517 // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP28]], 0
6518 // CHECK13-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
6519 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
6520 // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
6521 // CHECK13-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
6522 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6523 // CHECK13: .omp.final.done:
6524 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6525 // CHECK13: omp.precond.end:
6526 // CHECK13-NEXT: ret void
6529 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
6530 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
6531 // CHECK13-NEXT: entry:
6532 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6533 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6534 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6535 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6536 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
6537 // CHECK13-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
6538 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6539 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
6540 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6541 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6542 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
6543 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
6544 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6545 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6546 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6547 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6548 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6549 // CHECK13-NEXT: [[I5:%.*]] = alloca i32, align 4
6550 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6551 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6552 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6553 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6554 // CHECK13-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
6555 // CHECK13-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
6556 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6557 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
6558 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
6559 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
6560 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6561 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
6562 // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
6563 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6564 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
6565 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
6566 // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
6567 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
6568 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4
6569 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6570 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
6571 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
6572 // CHECK13: omp.precond.then:
6573 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6574 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
6575 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
6576 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6577 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
6578 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6579 // CHECK13-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
6580 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
6581 // CHECK13-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
6582 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6583 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6584 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
6585 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6586 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6587 // CHECK13-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6588 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
6589 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
6590 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
6591 // CHECK13: omp.dispatch.cond:
6592 // CHECK13-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6593 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
6594 // CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
6595 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
6596 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
6597 // CHECK13: omp.dispatch.body:
6598 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6599 // CHECK13-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
6600 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6601 // CHECK13: omp.inner.for.cond:
6602 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]]
6603 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP43]]
6604 // CHECK13-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
6605 // CHECK13-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6606 // CHECK13: omp.inner.for.body:
6607 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
6608 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
6609 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6610 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
6611 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
6612 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
6613 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
6614 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]]
6615 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6616 // CHECK13: omp.body.continue:
6617 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6618 // CHECK13: omp.inner.for.inc:
6619 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
6620 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], 1
6621 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
6622 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
6623 // CHECK13: omp.inner.for.end:
6624 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
6625 // CHECK13: omp.dispatch.inc:
6626 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
6627 // CHECK13: omp.dispatch.end:
6628 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6629 // CHECK13-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
6630 // CHECK13-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6631 // CHECK13: .omp.final.then:
6632 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
6633 // CHECK13-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
6634 // CHECK13-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
6635 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
6636 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
6637 // CHECK13-NEXT: store i32 [[ADD11]], ptr [[I5]], align 4
6638 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6639 // CHECK13: .omp.final.done:
6640 // CHECK13-NEXT: br label [[OMP_PRECOND_END]]
6641 // CHECK13: omp.precond.end:
6642 // CHECK13-NEXT: ret void
6645 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
6646 // CHECK13-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat {
6647 // CHECK13-NEXT: entry:
6648 // CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
6649 // CHECK13-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
6650 // CHECK13-NEXT: [[M:%.*]] = alloca i32, align 4
6651 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
6652 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
6653 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
6654 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6655 // CHECK13-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
6656 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 8
6657 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 8
6658 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 8
6659 // CHECK13-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
6660 // CHECK13-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6661 // CHECK13-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
6662 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 8
6663 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 8
6664 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 8
6665 // CHECK13-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
6666 // CHECK13-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6667 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 8
6668 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 8
6669 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 8
6670 // CHECK13-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
6671 // CHECK13-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6672 // CHECK13-NEXT: [[M_CASTED22:%.*]] = alloca i64, align 8
6673 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 8
6674 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 8
6675 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 8
6676 // CHECK13-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
6677 // CHECK13-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
6678 // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
6679 // CHECK13-NEXT: store i32 10, ptr [[M]], align 4
6680 // CHECK13-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6681 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP0]], align 8
6682 // CHECK13-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6683 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
6684 // CHECK13-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
6685 // CHECK13-NEXT: store ptr null, ptr [[TMP2]], align 8
6686 // CHECK13-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
6687 // CHECK13-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
6688 // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
6689 // CHECK13-NEXT: store i32 2, ptr [[TMP5]], align 4
6690 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
6691 // CHECK13-NEXT: store i32 1, ptr [[TMP6]], align 4
6692 // CHECK13-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
6693 // CHECK13-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
6694 // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
6695 // CHECK13-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
6696 // CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
6697 // CHECK13-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 8
6698 // CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
6699 // CHECK13-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 8
6700 // CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
6701 // CHECK13-NEXT: store ptr null, ptr [[TMP11]], align 8
6702 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
6703 // CHECK13-NEXT: store ptr null, ptr [[TMP12]], align 8
6704 // CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
6705 // CHECK13-NEXT: store i64 10, ptr [[TMP13]], align 8
6706 // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
6707 // CHECK13-NEXT: store i64 0, ptr [[TMP14]], align 8
6708 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
6709 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
6710 // CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
6711 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
6712 // CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
6713 // CHECK13-NEXT: store i32 0, ptr [[TMP17]], align 4
6714 // CHECK13-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
6715 // CHECK13-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
6716 // CHECK13-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
6717 // CHECK13: omp_offload.failed:
6718 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR4]]
6719 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
6720 // CHECK13: omp_offload.cont:
6721 // CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
6722 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP20]], align 8
6723 // CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
6724 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP21]], align 8
6725 // CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
6726 // CHECK13-NEXT: store ptr null, ptr [[TMP22]], align 8
6727 // CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
6728 // CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
6729 // CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
6730 // CHECK13-NEXT: store i32 2, ptr [[TMP25]], align 4
6731 // CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
6732 // CHECK13-NEXT: store i32 1, ptr [[TMP26]], align 4
6733 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
6734 // CHECK13-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
6735 // CHECK13-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
6736 // CHECK13-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
6737 // CHECK13-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
6738 // CHECK13-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 8
6739 // CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
6740 // CHECK13-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 8
6741 // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
6742 // CHECK13-NEXT: store ptr null, ptr [[TMP31]], align 8
6743 // CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
6744 // CHECK13-NEXT: store ptr null, ptr [[TMP32]], align 8
6745 // CHECK13-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
6746 // CHECK13-NEXT: store i64 10, ptr [[TMP33]], align 8
6747 // CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
6748 // CHECK13-NEXT: store i64 0, ptr [[TMP34]], align 8
6749 // CHECK13-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
6750 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
6751 // CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
6752 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
6753 // CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
6754 // CHECK13-NEXT: store i32 0, ptr [[TMP37]], align 4
6755 // CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
6756 // CHECK13-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
6757 // CHECK13-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
6758 // CHECK13: omp_offload.failed6:
6759 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR4]]
6760 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT7]]
6761 // CHECK13: omp_offload.cont7:
6762 // CHECK13-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
6763 // CHECK13-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
6764 // CHECK13-NEXT: [[TMP41:%.*]] = load i64, ptr [[M_CASTED]], align 8
6765 // CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
6766 // CHECK13-NEXT: store i64 [[TMP41]], ptr [[TMP42]], align 8
6767 // CHECK13-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
6768 // CHECK13-NEXT: store i64 [[TMP41]], ptr [[TMP43]], align 8
6769 // CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 0
6770 // CHECK13-NEXT: store ptr null, ptr [[TMP44]], align 8
6771 // CHECK13-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
6772 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP45]], align 8
6773 // CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
6774 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP46]], align 8
6775 // CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 1
6776 // CHECK13-NEXT: store ptr null, ptr [[TMP47]], align 8
6777 // CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
6778 // CHECK13-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
6779 // CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
6780 // CHECK13-NEXT: store i32 2, ptr [[TMP50]], align 4
6781 // CHECK13-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
6782 // CHECK13-NEXT: store i32 2, ptr [[TMP51]], align 4
6783 // CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
6784 // CHECK13-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 8
6785 // CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
6786 // CHECK13-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 8
6787 // CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
6788 // CHECK13-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 8
6789 // CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
6790 // CHECK13-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 8
6791 // CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
6792 // CHECK13-NEXT: store ptr null, ptr [[TMP56]], align 8
6793 // CHECK13-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
6794 // CHECK13-NEXT: store ptr null, ptr [[TMP57]], align 8
6795 // CHECK13-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
6796 // CHECK13-NEXT: store i64 10, ptr [[TMP58]], align 8
6797 // CHECK13-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
6798 // CHECK13-NEXT: store i64 0, ptr [[TMP59]], align 8
6799 // CHECK13-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
6800 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
6801 // CHECK13-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
6802 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
6803 // CHECK13-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
6804 // CHECK13-NEXT: store i32 0, ptr [[TMP62]], align 4
6805 // CHECK13-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
6806 // CHECK13-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
6807 // CHECK13-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
6808 // CHECK13: omp_offload.failed13:
6809 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i64 [[TMP41]], ptr [[A]]) #[[ATTR4]]
6810 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT14]]
6811 // CHECK13: omp_offload.cont14:
6812 // CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
6813 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP65]], align 8
6814 // CHECK13-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
6815 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP66]], align 8
6816 // CHECK13-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 0
6817 // CHECK13-NEXT: store ptr null, ptr [[TMP67]], align 8
6818 // CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
6819 // CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
6820 // CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
6821 // CHECK13-NEXT: store i32 2, ptr [[TMP70]], align 4
6822 // CHECK13-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
6823 // CHECK13-NEXT: store i32 1, ptr [[TMP71]], align 4
6824 // CHECK13-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
6825 // CHECK13-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 8
6826 // CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
6827 // CHECK13-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 8
6828 // CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
6829 // CHECK13-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 8
6830 // CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
6831 // CHECK13-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 8
6832 // CHECK13-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
6833 // CHECK13-NEXT: store ptr null, ptr [[TMP76]], align 8
6834 // CHECK13-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
6835 // CHECK13-NEXT: store ptr null, ptr [[TMP77]], align 8
6836 // CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
6837 // CHECK13-NEXT: store i64 10, ptr [[TMP78]], align 8
6838 // CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
6839 // CHECK13-NEXT: store i64 0, ptr [[TMP79]], align 8
6840 // CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
6841 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
6842 // CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
6843 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
6844 // CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
6845 // CHECK13-NEXT: store i32 0, ptr [[TMP82]], align 4
6846 // CHECK13-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
6847 // CHECK13-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
6848 // CHECK13-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
6849 // CHECK13: omp_offload.failed20:
6850 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR4]]
6851 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT21]]
6852 // CHECK13: omp_offload.cont21:
6853 // CHECK13-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
6854 // CHECK13-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
6855 // CHECK13-NEXT: [[TMP86:%.*]] = load i64, ptr [[M_CASTED22]], align 8
6856 // CHECK13-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
6857 // CHECK13-NEXT: store i64 [[TMP86]], ptr [[TMP87]], align 8
6858 // CHECK13-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
6859 // CHECK13-NEXT: store i64 [[TMP86]], ptr [[TMP88]], align 8
6860 // CHECK13-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 0
6861 // CHECK13-NEXT: store ptr null, ptr [[TMP89]], align 8
6862 // CHECK13-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
6863 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP90]], align 8
6864 // CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
6865 // CHECK13-NEXT: store ptr [[A]], ptr [[TMP91]], align 8
6866 // CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 1
6867 // CHECK13-NEXT: store ptr null, ptr [[TMP92]], align 8
6868 // CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
6869 // CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
6870 // CHECK13-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
6871 // CHECK13-NEXT: store i32 2, ptr [[TMP95]], align 4
6872 // CHECK13-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
6873 // CHECK13-NEXT: store i32 2, ptr [[TMP96]], align 4
6874 // CHECK13-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
6875 // CHECK13-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 8
6876 // CHECK13-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
6877 // CHECK13-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 8
6878 // CHECK13-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
6879 // CHECK13-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 8
6880 // CHECK13-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
6881 // CHECK13-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 8
6882 // CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
6883 // CHECK13-NEXT: store ptr null, ptr [[TMP101]], align 8
6884 // CHECK13-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
6885 // CHECK13-NEXT: store ptr null, ptr [[TMP102]], align 8
6886 // CHECK13-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
6887 // CHECK13-NEXT: store i64 10, ptr [[TMP103]], align 8
6888 // CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
6889 // CHECK13-NEXT: store i64 0, ptr [[TMP104]], align 8
6890 // CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
6891 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
6892 // CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
6893 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
6894 // CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
6895 // CHECK13-NEXT: store i32 0, ptr [[TMP107]], align 4
6896 // CHECK13-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
6897 // CHECK13-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
6898 // CHECK13-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
6899 // CHECK13: omp_offload.failed28:
6900 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i64 [[TMP86]], ptr [[A]]) #[[ATTR4]]
6901 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT29]]
6902 // CHECK13: omp_offload.cont29:
6903 // CHECK13-NEXT: ret i32 0
6906 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
6907 // CHECK13-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
6908 // CHECK13-NEXT: entry:
6909 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6910 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6911 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6912 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
6913 // CHECK13-NEXT: ret void
6916 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
6917 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
6918 // CHECK13-NEXT: entry:
6919 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6920 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6921 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6922 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6923 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6924 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
6925 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
6926 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6927 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6928 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6929 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6930 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6931 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
6932 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
6933 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
6934 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
6935 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6936 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6937 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6938 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
6939 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6940 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6941 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
6942 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6943 // CHECK13: cond.true:
6944 // CHECK13-NEXT: br label [[COND_END:%.*]]
6945 // CHECK13: cond.false:
6946 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
6947 // CHECK13-NEXT: br label [[COND_END]]
6948 // CHECK13: cond.end:
6949 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
6950 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
6951 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
6952 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
6953 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6954 // CHECK13: omp.inner.for.cond:
6955 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]]
6956 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
6957 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
6958 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6959 // CHECK13: omp.inner.for.body:
6960 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP46]]
6961 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
6962 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
6963 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
6964 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP46]]
6965 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6966 // CHECK13: omp.inner.for.inc:
6967 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
6968 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP46]]
6969 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
6970 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
6971 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
6972 // CHECK13: omp.inner.for.end:
6973 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6974 // CHECK13: omp.loop.exit:
6975 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
6976 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6977 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
6978 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6979 // CHECK13: .omp.final.then:
6980 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
6981 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6982 // CHECK13: .omp.final.done:
6983 // CHECK13-NEXT: ret void
6986 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
6987 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
6988 // CHECK13-NEXT: entry:
6989 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6990 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6991 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6992 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6993 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
6994 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6995 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6996 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6997 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6998 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6999 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7000 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7001 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7002 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7003 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7004 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7005 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7006 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7007 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7008 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7009 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7010 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7011 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7012 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7013 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7014 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7015 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7016 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7017 // CHECK13-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7018 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
7019 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7020 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7021 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
7022 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7023 // CHECK13: cond.true:
7024 // CHECK13-NEXT: br label [[COND_END:%.*]]
7025 // CHECK13: cond.false:
7026 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7027 // CHECK13-NEXT: br label [[COND_END]]
7028 // CHECK13: cond.end:
7029 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
7030 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
7031 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7032 // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
7033 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7034 // CHECK13: omp.inner.for.cond:
7035 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]]
7036 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP49]]
7037 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
7038 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7039 // CHECK13: omp.inner.for.body:
7040 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
7041 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
7042 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7043 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
7044 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
7045 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
7046 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7047 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP49]]
7048 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7049 // CHECK13: omp.body.continue:
7050 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7051 // CHECK13: omp.inner.for.inc:
7052 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
7053 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
7054 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
7055 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
7056 // CHECK13: omp.inner.for.end:
7057 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7058 // CHECK13: omp.loop.exit:
7059 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
7060 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7061 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
7062 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7063 // CHECK13: .omp.final.then:
7064 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7065 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7066 // CHECK13: .omp.final.done:
7067 // CHECK13-NEXT: ret void
7070 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
7071 // CHECK13-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7072 // CHECK13-NEXT: entry:
7073 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7074 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7075 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7076 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
7077 // CHECK13-NEXT: ret void
7080 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
7081 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
7082 // CHECK13-NEXT: entry:
7083 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7084 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7085 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7086 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7087 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7088 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7089 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7090 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7091 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7092 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7093 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7094 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7095 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7096 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7097 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7098 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7099 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7100 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7101 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7102 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7103 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7104 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7105 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7106 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7107 // CHECK13: cond.true:
7108 // CHECK13-NEXT: br label [[COND_END:%.*]]
7109 // CHECK13: cond.false:
7110 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7111 // CHECK13-NEXT: br label [[COND_END]]
7112 // CHECK13: cond.end:
7113 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7114 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7115 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7116 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7117 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7118 // CHECK13: omp.inner.for.cond:
7119 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]]
7120 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
7121 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7122 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7123 // CHECK13: omp.inner.for.body:
7124 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP52]]
7125 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7126 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
7127 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7128 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP52]]
7129 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7130 // CHECK13: omp.inner.for.inc:
7131 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
7132 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP52]]
7133 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
7134 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
7135 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
7136 // CHECK13: omp.inner.for.end:
7137 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7138 // CHECK13: omp.loop.exit:
7139 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7140 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7141 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7142 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7143 // CHECK13: .omp.final.then:
7144 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7145 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7146 // CHECK13: .omp.final.done:
7147 // CHECK13-NEXT: ret void
7150 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
7151 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
7152 // CHECK13-NEXT: entry:
7153 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7154 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7155 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7156 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7157 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7158 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7159 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7160 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7161 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7162 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7163 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7164 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7165 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7166 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7167 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7168 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7169 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7170 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7171 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7172 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7173 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7174 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7175 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7176 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7177 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7178 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7179 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7180 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7181 // CHECK13-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7182 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
7183 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7184 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7185 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
7186 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7187 // CHECK13: cond.true:
7188 // CHECK13-NEXT: br label [[COND_END:%.*]]
7189 // CHECK13: cond.false:
7190 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7191 // CHECK13-NEXT: br label [[COND_END]]
7192 // CHECK13: cond.end:
7193 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
7194 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
7195 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7196 // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
7197 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7198 // CHECK13: omp.inner.for.cond:
7199 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]]
7200 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP55]]
7201 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
7202 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7203 // CHECK13: omp.inner.for.body:
7204 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
7205 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
7206 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7207 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
7208 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
7209 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
7210 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7211 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP55]]
7212 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7213 // CHECK13: omp.body.continue:
7214 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7215 // CHECK13: omp.inner.for.inc:
7216 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
7217 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
7218 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
7219 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
7220 // CHECK13: omp.inner.for.end:
7221 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7222 // CHECK13: omp.loop.exit:
7223 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
7224 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7225 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
7226 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7227 // CHECK13: .omp.final.then:
7228 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7229 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7230 // CHECK13: .omp.final.done:
7231 // CHECK13-NEXT: ret void
7234 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
7235 // CHECK13-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7236 // CHECK13-NEXT: entry:
7237 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
7238 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7239 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7240 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7241 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
7242 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7243 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7244 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
7245 // CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
7246 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
7247 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
7248 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
7249 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
7250 // CHECK13-NEXT: ret void
7253 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
7254 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
7255 // CHECK13-NEXT: entry:
7256 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7257 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7258 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7259 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7260 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7261 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7262 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7263 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7264 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7265 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7266 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7267 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7268 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7269 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7270 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7271 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7272 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7273 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7274 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7275 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7276 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7277 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7278 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7279 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7280 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7281 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7282 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7283 // CHECK13: cond.true:
7284 // CHECK13-NEXT: br label [[COND_END:%.*]]
7285 // CHECK13: cond.false:
7286 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7287 // CHECK13-NEXT: br label [[COND_END]]
7288 // CHECK13: cond.end:
7289 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7290 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7291 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7292 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7293 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7294 // CHECK13: omp.inner.for.cond:
7295 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]]
7296 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
7297 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7298 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7299 // CHECK13: omp.inner.for.body:
7300 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP58]]
7301 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7302 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
7303 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7304 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP58]]
7305 // CHECK13-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP58]]
7306 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP58]]
7307 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP58]]
7308 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7309 // CHECK13: omp.inner.for.inc:
7310 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
7311 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP58]]
7312 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
7313 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
7314 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
7315 // CHECK13: omp.inner.for.end:
7316 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7317 // CHECK13: omp.loop.exit:
7318 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7319 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7320 // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
7321 // CHECK13-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7322 // CHECK13: .omp.final.then:
7323 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7324 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7325 // CHECK13: .omp.final.done:
7326 // CHECK13-NEXT: ret void
7329 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
7330 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
7331 // CHECK13-NEXT: entry:
7332 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7333 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7334 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7335 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7336 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7337 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7338 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7339 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7340 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7341 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7342 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7343 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7344 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7345 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7346 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7347 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7348 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7349 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7350 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7351 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7352 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7353 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7354 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7355 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7356 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7357 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7358 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7359 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7360 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7361 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7362 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
7363 // CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7364 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
7365 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
7366 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
7367 // CHECK13: omp.dispatch.cond:
7368 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7369 // CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7370 // CHECK13-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP7]] to i32
7371 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV2]]
7372 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7373 // CHECK13: cond.true:
7374 // CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7375 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
7376 // CHECK13-NEXT: br label [[COND_END:%.*]]
7377 // CHECK13: cond.false:
7378 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7379 // CHECK13-NEXT: br label [[COND_END]]
7380 // CHECK13: cond.end:
7381 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
7382 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
7383 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7384 // CHECK13-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
7385 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
7386 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7387 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
7388 // CHECK13-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7389 // CHECK13: omp.dispatch.body:
7390 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7391 // CHECK13: omp.inner.for.cond:
7392 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61:![0-9]+]]
7393 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP61]]
7394 // CHECK13-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
7395 // CHECK13-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7396 // CHECK13: omp.inner.for.body:
7397 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
7398 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
7399 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7400 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
7401 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
7402 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
7403 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7404 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP61]]
7405 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7406 // CHECK13: omp.body.continue:
7407 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7408 // CHECK13: omp.inner.for.inc:
7409 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
7410 // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], 1
7411 // CHECK13-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
7412 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
7413 // CHECK13: omp.inner.for.end:
7414 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
7415 // CHECK13: omp.dispatch.inc:
7416 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7417 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
7418 // CHECK13-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
7419 // CHECK13-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
7420 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7421 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
7422 // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
7423 // CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
7424 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
7425 // CHECK13: omp.dispatch.end:
7426 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
7427 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7428 // CHECK13-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
7429 // CHECK13-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7430 // CHECK13: .omp.final.then:
7431 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7432 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7433 // CHECK13: .omp.final.done:
7434 // CHECK13-NEXT: ret void
7437 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
7438 // CHECK13-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7439 // CHECK13-NEXT: entry:
7440 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7441 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7442 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7443 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
7444 // CHECK13-NEXT: ret void
7447 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
7448 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
7449 // CHECK13-NEXT: entry:
7450 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7451 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7452 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7453 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7454 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7455 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7456 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7457 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7458 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7459 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7460 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7461 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7462 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7463 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7464 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7465 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7466 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7467 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7468 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7469 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7470 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7471 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7472 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7473 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7474 // CHECK13: cond.true:
7475 // CHECK13-NEXT: br label [[COND_END:%.*]]
7476 // CHECK13: cond.false:
7477 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7478 // CHECK13-NEXT: br label [[COND_END]]
7479 // CHECK13: cond.end:
7480 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7481 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7482 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7483 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7484 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7485 // CHECK13: omp.inner.for.cond:
7486 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64:![0-9]+]]
7487 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
7488 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7489 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7490 // CHECK13: omp.inner.for.body:
7491 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP64]]
7492 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7493 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
7494 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7495 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP64]]
7496 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7497 // CHECK13: omp.inner.for.inc:
7498 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
7499 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP64]]
7500 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
7501 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
7502 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
7503 // CHECK13: omp.inner.for.end:
7504 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7505 // CHECK13: omp.loop.exit:
7506 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7507 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7508 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7509 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7510 // CHECK13: .omp.final.then:
7511 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7512 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7513 // CHECK13: .omp.final.done:
7514 // CHECK13-NEXT: ret void
7517 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
7518 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
7519 // CHECK13-NEXT: entry:
7520 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7521 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7522 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7523 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7524 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7525 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7526 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7527 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7528 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7529 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7530 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7531 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7532 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7533 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7534 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7535 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7536 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7537 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7538 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7539 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7540 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7541 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7542 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7543 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7544 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7545 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7546 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7547 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7548 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7549 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7550 // CHECK13-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7551 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
7552 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
7553 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
7554 // CHECK13: omp.dispatch.cond:
7555 // CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
7556 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
7557 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7558 // CHECK13: omp.dispatch.body:
7559 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7560 // CHECK13-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
7561 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7562 // CHECK13: omp.inner.for.cond:
7563 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67:![0-9]+]]
7564 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP67]]
7565 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
7566 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7567 // CHECK13: omp.inner.for.body:
7568 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
7569 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
7570 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7571 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
7572 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
7573 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
7574 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7575 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP67]]
7576 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7577 // CHECK13: omp.body.continue:
7578 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7579 // CHECK13: omp.inner.for.inc:
7580 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
7581 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
7582 // CHECK13-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
7583 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
7584 // CHECK13: omp.inner.for.end:
7585 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
7586 // CHECK13: omp.dispatch.inc:
7587 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
7588 // CHECK13: omp.dispatch.end:
7589 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7590 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
7591 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7592 // CHECK13: .omp.final.then:
7593 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7594 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7595 // CHECK13: .omp.final.done:
7596 // CHECK13-NEXT: ret void
7599 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
7600 // CHECK13-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
7601 // CHECK13-NEXT: entry:
7602 // CHECK13-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
7603 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7604 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7605 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7606 // CHECK13-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
7607 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7608 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7609 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
7610 // CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
7611 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
7612 // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
7613 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
7614 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
7615 // CHECK13-NEXT: ret void
7618 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
7619 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
7620 // CHECK13-NEXT: entry:
7621 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7622 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7623 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7624 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7625 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7626 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7627 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
7628 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
7629 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7630 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7631 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7632 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
7633 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7634 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7635 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7636 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7637 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7638 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
7639 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
7640 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7641 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7642 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7643 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
7644 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
7645 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7646 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
7647 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
7648 // CHECK13: cond.true:
7649 // CHECK13-NEXT: br label [[COND_END:%.*]]
7650 // CHECK13: cond.false:
7651 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
7652 // CHECK13-NEXT: br label [[COND_END]]
7653 // CHECK13: cond.end:
7654 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
7655 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
7656 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
7657 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
7658 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7659 // CHECK13: omp.inner.for.cond:
7660 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70:![0-9]+]]
7661 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
7662 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
7663 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7664 // CHECK13: omp.inner.for.body:
7665 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP70]]
7666 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
7667 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
7668 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
7669 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP70]]
7670 // CHECK13-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP70]]
7671 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP70]]
7672 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP70]]
7673 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7674 // CHECK13: omp.inner.for.inc:
7675 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
7676 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP70]]
7677 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
7678 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
7679 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
7680 // CHECK13: omp.inner.for.end:
7681 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
7682 // CHECK13: omp.loop.exit:
7683 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
7684 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7685 // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
7686 // CHECK13-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7687 // CHECK13: .omp.final.then:
7688 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7689 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7690 // CHECK13: .omp.final.done:
7691 // CHECK13-NEXT: ret void
7694 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
7695 // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
7696 // CHECK13-NEXT: entry:
7697 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
7698 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
7699 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
7700 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
7701 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
7702 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
7703 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
7704 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
7705 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
7706 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
7707 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
7708 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
7709 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
7710 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
7711 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
7712 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7713 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7714 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
7715 // CHECK13-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
7716 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
7717 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
7718 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
7719 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
7720 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
7721 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
7722 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
7723 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
7724 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
7725 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
7726 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
7727 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
7728 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7729 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
7730 // CHECK13-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
7731 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
7732 // CHECK13-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
7733 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
7734 // CHECK13: omp.dispatch.cond:
7735 // CHECK13-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
7736 // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
7737 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
7738 // CHECK13: omp.dispatch.body:
7739 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
7740 // CHECK13-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
7741 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
7742 // CHECK13: omp.inner.for.cond:
7743 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73:![0-9]+]]
7744 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP73]]
7745 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
7746 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
7747 // CHECK13: omp.inner.for.body:
7748 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
7749 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
7750 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
7751 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
7752 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
7753 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
7754 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
7755 // CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP73]]
7756 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
7757 // CHECK13: omp.body.continue:
7758 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
7759 // CHECK13: omp.inner.for.inc:
7760 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
7761 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP14]], 1
7762 // CHECK13-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
7763 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
7764 // CHECK13: omp.inner.for.end:
7765 // CHECK13-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
7766 // CHECK13: omp.dispatch.inc:
7767 // CHECK13-NEXT: br label [[OMP_DISPATCH_COND]]
7768 // CHECK13: omp.dispatch.end:
7769 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
7770 // CHECK13-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
7771 // CHECK13-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
7772 // CHECK13: .omp.final.then:
7773 // CHECK13-NEXT: store i32 10, ptr [[I]], align 4
7774 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
7775 // CHECK13: .omp.final.done:
7776 // CHECK13-NEXT: ret void
7779 // CHECK13-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
7780 // CHECK13-SAME: () #[[ATTR7:[0-9]+]] {
7781 // CHECK13-NEXT: entry:
7782 // CHECK13-NEXT: call void @__tgt_register_requires(i64 1)
7783 // CHECK13-NEXT: ret void
7786 // CHECK14-LABEL: define {{[^@]+}}@main
7787 // CHECK14-SAME: (i32 noundef signext [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
7788 // CHECK14-NEXT: entry:
7789 // CHECK14-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
7790 // CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
7791 // CHECK14-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
7792 // CHECK14-NEXT: [[N:%.*]] = alloca i32, align 4
7793 // CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
7794 // CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
7795 // CHECK14-NEXT: [[M:%.*]] = alloca i32, align 4
7796 // CHECK14-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
7797 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 8
7798 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 8
7799 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 8
7800 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8
7801 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
7802 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
7803 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
7804 // CHECK14-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
7805 // CHECK14-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
7806 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 8
7807 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 8
7808 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 8
7809 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 8
7810 // CHECK14-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
7811 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
7812 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
7813 // CHECK14-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7814 // CHECK14-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
7815 // CHECK14-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8
7816 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 8
7817 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 8
7818 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 8
7819 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 8
7820 // CHECK14-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
7821 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
7822 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
7823 // CHECK14-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7824 // CHECK14-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8
7825 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 8
7826 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 8
7827 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 8
7828 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 8
7829 // CHECK14-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
7830 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
7831 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
7832 // CHECK14-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7833 // CHECK14-NEXT: [[M_CASTED48:%.*]] = alloca i64, align 8
7834 // CHECK14-NEXT: [[N_CASTED49:%.*]] = alloca i64, align 8
7835 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 8
7836 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 8
7837 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 8
7838 // CHECK14-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 8
7839 // CHECK14-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
7840 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
7841 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
7842 // CHECK14-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
7843 // CHECK14-NEXT: store i32 0, ptr [[RETVAL]], align 4
7844 // CHECK14-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
7845 // CHECK14-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
7846 // CHECK14-NEXT: store i32 100, ptr [[N]], align 4
7847 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
7848 // CHECK14-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
7849 // CHECK14-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
7850 // CHECK14-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
7851 // CHECK14-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
7852 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
7853 // CHECK14-NEXT: store i32 10, ptr [[M]], align 4
7854 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
7855 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
7856 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
7857 // CHECK14-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4
7858 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 24, i1 false)
7859 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7860 // CHECK14-NEXT: store i64 [[TMP4]], ptr [[TMP6]], align 8
7861 // CHECK14-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7862 // CHECK14-NEXT: store i64 [[TMP4]], ptr [[TMP7]], align 8
7863 // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
7864 // CHECK14-NEXT: store ptr null, ptr [[TMP8]], align 8
7865 // CHECK14-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
7866 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP9]], align 8
7867 // CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
7868 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP10]], align 8
7869 // CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
7870 // CHECK14-NEXT: store ptr null, ptr [[TMP11]], align 8
7871 // CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
7872 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 8
7873 // CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
7874 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 8
7875 // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
7876 // CHECK14-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 8
7877 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
7878 // CHECK14-NEXT: store ptr null, ptr [[TMP15]], align 8
7879 // CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
7880 // CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
7881 // CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
7882 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
7883 // CHECK14-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
7884 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
7885 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
7886 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
7887 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
7888 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
7889 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
7890 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
7891 // CHECK14-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
7892 // CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
7893 // CHECK14-NEXT: store i32 2, ptr [[TMP23]], align 4
7894 // CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
7895 // CHECK14-NEXT: store i32 3, ptr [[TMP24]], align 4
7896 // CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
7897 // CHECK14-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 8
7898 // CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
7899 // CHECK14-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 8
7900 // CHECK14-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
7901 // CHECK14-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 8
7902 // CHECK14-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
7903 // CHECK14-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 8
7904 // CHECK14-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
7905 // CHECK14-NEXT: store ptr null, ptr [[TMP29]], align 8
7906 // CHECK14-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
7907 // CHECK14-NEXT: store ptr null, ptr [[TMP30]], align 8
7908 // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
7909 // CHECK14-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
7910 // CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
7911 // CHECK14-NEXT: store i64 0, ptr [[TMP32]], align 8
7912 // CHECK14-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
7913 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
7914 // CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
7915 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
7916 // CHECK14-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
7917 // CHECK14-NEXT: store i32 0, ptr [[TMP35]], align 4
7918 // CHECK14-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
7919 // CHECK14-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
7920 // CHECK14-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
7921 // CHECK14: omp_offload.failed:
7922 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4:[0-9]+]]
7923 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]]
7924 // CHECK14: omp_offload.cont:
7925 // CHECK14-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
7926 // CHECK14-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
7927 // CHECK14-NEXT: [[TMP39:%.*]] = load i64, ptr [[N_CASTED3]], align 8
7928 // CHECK14-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP1]], 4
7929 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES7]], ptr align 8 @.offload_sizes.1, i64 24, i1 false)
7930 // CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
7931 // CHECK14-NEXT: store i64 [[TMP39]], ptr [[TMP41]], align 8
7932 // CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
7933 // CHECK14-NEXT: store i64 [[TMP39]], ptr [[TMP42]], align 8
7934 // CHECK14-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0
7935 // CHECK14-NEXT: store ptr null, ptr [[TMP43]], align 8
7936 // CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
7937 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP44]], align 8
7938 // CHECK14-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
7939 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP45]], align 8
7940 // CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 1
7941 // CHECK14-NEXT: store ptr null, ptr [[TMP46]], align 8
7942 // CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
7943 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP47]], align 8
7944 // CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
7945 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 8
7946 // CHECK14-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
7947 // CHECK14-NEXT: store i64 [[TMP40]], ptr [[TMP49]], align 8
7948 // CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 2
7949 // CHECK14-NEXT: store ptr null, ptr [[TMP50]], align 8
7950 // CHECK14-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
7951 // CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
7952 // CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
7953 // CHECK14-NEXT: [[TMP54:%.*]] = load i32, ptr [[N]], align 4
7954 // CHECK14-NEXT: store i32 [[TMP54]], ptr [[DOTCAPTURE_EXPR_9]], align 4
7955 // CHECK14-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
7956 // CHECK14-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP55]], 0
7957 // CHECK14-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
7958 // CHECK14-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
7959 // CHECK14-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
7960 // CHECK14-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
7961 // CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP56]], 1
7962 // CHECK14-NEXT: [[TMP57:%.*]] = zext i32 [[ADD14]] to i64
7963 // CHECK14-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
7964 // CHECK14-NEXT: store i32 2, ptr [[TMP58]], align 4
7965 // CHECK14-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
7966 // CHECK14-NEXT: store i32 3, ptr [[TMP59]], align 4
7967 // CHECK14-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
7968 // CHECK14-NEXT: store ptr [[TMP51]], ptr [[TMP60]], align 8
7969 // CHECK14-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
7970 // CHECK14-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 8
7971 // CHECK14-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
7972 // CHECK14-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 8
7973 // CHECK14-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
7974 // CHECK14-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP63]], align 8
7975 // CHECK14-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
7976 // CHECK14-NEXT: store ptr null, ptr [[TMP64]], align 8
7977 // CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
7978 // CHECK14-NEXT: store ptr null, ptr [[TMP65]], align 8
7979 // CHECK14-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
7980 // CHECK14-NEXT: store i64 [[TMP57]], ptr [[TMP66]], align 8
7981 // CHECK14-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
7982 // CHECK14-NEXT: store i64 0, ptr [[TMP67]], align 8
7983 // CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
7984 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP68]], align 4
7985 // CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
7986 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
7987 // CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
7988 // CHECK14-NEXT: store i32 0, ptr [[TMP70]], align 4
7989 // CHECK14-NEXT: [[TMP71:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
7990 // CHECK14-NEXT: [[TMP72:%.*]] = icmp ne i32 [[TMP71]], 0
7991 // CHECK14-NEXT: br i1 [[TMP72]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
7992 // CHECK14: omp_offload.failed16:
7993 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP39]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
7994 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT17]]
7995 // CHECK14: omp_offload.cont17:
7996 // CHECK14-NEXT: [[TMP73:%.*]] = load i32, ptr [[M]], align 4
7997 // CHECK14-NEXT: store i32 [[TMP73]], ptr [[M_CASTED]], align 4
7998 // CHECK14-NEXT: [[TMP74:%.*]] = load i64, ptr [[M_CASTED]], align 8
7999 // CHECK14-NEXT: [[TMP75:%.*]] = load i32, ptr [[N]], align 4
8000 // CHECK14-NEXT: store i32 [[TMP75]], ptr [[N_CASTED18]], align 4
8001 // CHECK14-NEXT: [[TMP76:%.*]] = load i64, ptr [[N_CASTED18]], align 8
8002 // CHECK14-NEXT: [[TMP77:%.*]] = mul nuw i64 [[TMP1]], 4
8003 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES22]], ptr align 8 @.offload_sizes.3, i64 32, i1 false)
8004 // CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
8005 // CHECK14-NEXT: store i64 [[TMP74]], ptr [[TMP78]], align 8
8006 // CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
8007 // CHECK14-NEXT: store i64 [[TMP74]], ptr [[TMP79]], align 8
8008 // CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0
8009 // CHECK14-NEXT: store ptr null, ptr [[TMP80]], align 8
8010 // CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
8011 // CHECK14-NEXT: store i64 [[TMP76]], ptr [[TMP81]], align 8
8012 // CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
8013 // CHECK14-NEXT: store i64 [[TMP76]], ptr [[TMP82]], align 8
8014 // CHECK14-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1
8015 // CHECK14-NEXT: store ptr null, ptr [[TMP83]], align 8
8016 // CHECK14-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
8017 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP84]], align 8
8018 // CHECK14-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
8019 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP85]], align 8
8020 // CHECK14-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2
8021 // CHECK14-NEXT: store ptr null, ptr [[TMP86]], align 8
8022 // CHECK14-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
8023 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP87]], align 8
8024 // CHECK14-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
8025 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP88]], align 8
8026 // CHECK14-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
8027 // CHECK14-NEXT: store i64 [[TMP77]], ptr [[TMP89]], align 8
8028 // CHECK14-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 3
8029 // CHECK14-NEXT: store ptr null, ptr [[TMP90]], align 8
8030 // CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
8031 // CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
8032 // CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
8033 // CHECK14-NEXT: [[TMP94:%.*]] = load i32, ptr [[N]], align 4
8034 // CHECK14-NEXT: store i32 [[TMP94]], ptr [[DOTCAPTURE_EXPR_24]], align 4
8035 // CHECK14-NEXT: [[TMP95:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
8036 // CHECK14-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP95]], 0
8037 // CHECK14-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
8038 // CHECK14-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
8039 // CHECK14-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
8040 // CHECK14-NEXT: [[TMP96:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
8041 // CHECK14-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP96]], 1
8042 // CHECK14-NEXT: [[TMP97:%.*]] = zext i32 [[ADD29]] to i64
8043 // CHECK14-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
8044 // CHECK14-NEXT: store i32 2, ptr [[TMP98]], align 4
8045 // CHECK14-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
8046 // CHECK14-NEXT: store i32 4, ptr [[TMP99]], align 4
8047 // CHECK14-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
8048 // CHECK14-NEXT: store ptr [[TMP91]], ptr [[TMP100]], align 8
8049 // CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
8050 // CHECK14-NEXT: store ptr [[TMP92]], ptr [[TMP101]], align 8
8051 // CHECK14-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
8052 // CHECK14-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 8
8053 // CHECK14-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
8054 // CHECK14-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP103]], align 8
8055 // CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
8056 // CHECK14-NEXT: store ptr null, ptr [[TMP104]], align 8
8057 // CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
8058 // CHECK14-NEXT: store ptr null, ptr [[TMP105]], align 8
8059 // CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
8060 // CHECK14-NEXT: store i64 [[TMP97]], ptr [[TMP106]], align 8
8061 // CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
8062 // CHECK14-NEXT: store i64 0, ptr [[TMP107]], align 8
8063 // CHECK14-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
8064 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP108]], align 4
8065 // CHECK14-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
8066 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP109]], align 4
8067 // CHECK14-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
8068 // CHECK14-NEXT: store i32 0, ptr [[TMP110]], align 4
8069 // CHECK14-NEXT: [[TMP111:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
8070 // CHECK14-NEXT: [[TMP112:%.*]] = icmp ne i32 [[TMP111]], 0
8071 // CHECK14-NEXT: br i1 [[TMP112]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
8072 // CHECK14: omp_offload.failed31:
8073 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP74]], i64 [[TMP76]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
8074 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT32]]
8075 // CHECK14: omp_offload.cont32:
8076 // CHECK14-NEXT: [[TMP113:%.*]] = load i32, ptr [[N]], align 4
8077 // CHECK14-NEXT: store i32 [[TMP113]], ptr [[N_CASTED33]], align 4
8078 // CHECK14-NEXT: [[TMP114:%.*]] = load i64, ptr [[N_CASTED33]], align 8
8079 // CHECK14-NEXT: [[TMP115:%.*]] = mul nuw i64 [[TMP1]], 4
8080 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES37]], ptr align 8 @.offload_sizes.5, i64 24, i1 false)
8081 // CHECK14-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8082 // CHECK14-NEXT: store i64 [[TMP114]], ptr [[TMP116]], align 8
8083 // CHECK14-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8084 // CHECK14-NEXT: store i64 [[TMP114]], ptr [[TMP117]], align 8
8085 // CHECK14-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0
8086 // CHECK14-NEXT: store ptr null, ptr [[TMP118]], align 8
8087 // CHECK14-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
8088 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP119]], align 8
8089 // CHECK14-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
8090 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP120]], align 8
8091 // CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1
8092 // CHECK14-NEXT: store ptr null, ptr [[TMP121]], align 8
8093 // CHECK14-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
8094 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP122]], align 8
8095 // CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
8096 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP123]], align 8
8097 // CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
8098 // CHECK14-NEXT: store i64 [[TMP115]], ptr [[TMP124]], align 8
8099 // CHECK14-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2
8100 // CHECK14-NEXT: store ptr null, ptr [[TMP125]], align 8
8101 // CHECK14-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
8102 // CHECK14-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
8103 // CHECK14-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
8104 // CHECK14-NEXT: [[TMP129:%.*]] = load i32, ptr [[N]], align 4
8105 // CHECK14-NEXT: store i32 [[TMP129]], ptr [[DOTCAPTURE_EXPR_39]], align 4
8106 // CHECK14-NEXT: [[TMP130:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
8107 // CHECK14-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP130]], 0
8108 // CHECK14-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
8109 // CHECK14-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
8110 // CHECK14-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
8111 // CHECK14-NEXT: [[TMP131:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
8112 // CHECK14-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP131]], 1
8113 // CHECK14-NEXT: [[TMP132:%.*]] = zext i32 [[ADD44]] to i64
8114 // CHECK14-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
8115 // CHECK14-NEXT: store i32 2, ptr [[TMP133]], align 4
8116 // CHECK14-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
8117 // CHECK14-NEXT: store i32 3, ptr [[TMP134]], align 4
8118 // CHECK14-NEXT: [[TMP135:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
8119 // CHECK14-NEXT: store ptr [[TMP126]], ptr [[TMP135]], align 8
8120 // CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
8121 // CHECK14-NEXT: store ptr [[TMP127]], ptr [[TMP136]], align 8
8122 // CHECK14-NEXT: [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
8123 // CHECK14-NEXT: store ptr [[TMP128]], ptr [[TMP137]], align 8
8124 // CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
8125 // CHECK14-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP138]], align 8
8126 // CHECK14-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
8127 // CHECK14-NEXT: store ptr null, ptr [[TMP139]], align 8
8128 // CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
8129 // CHECK14-NEXT: store ptr null, ptr [[TMP140]], align 8
8130 // CHECK14-NEXT: [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
8131 // CHECK14-NEXT: store i64 [[TMP132]], ptr [[TMP141]], align 8
8132 // CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
8133 // CHECK14-NEXT: store i64 0, ptr [[TMP142]], align 8
8134 // CHECK14-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
8135 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP143]], align 4
8136 // CHECK14-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
8137 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP144]], align 4
8138 // CHECK14-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
8139 // CHECK14-NEXT: store i32 0, ptr [[TMP145]], align 4
8140 // CHECK14-NEXT: [[TMP146:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
8141 // CHECK14-NEXT: [[TMP147:%.*]] = icmp ne i32 [[TMP146]], 0
8142 // CHECK14-NEXT: br i1 [[TMP147]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
8143 // CHECK14: omp_offload.failed46:
8144 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP114]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
8145 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT47]]
8146 // CHECK14: omp_offload.cont47:
8147 // CHECK14-NEXT: [[TMP148:%.*]] = load i32, ptr [[M]], align 4
8148 // CHECK14-NEXT: store i32 [[TMP148]], ptr [[M_CASTED48]], align 4
8149 // CHECK14-NEXT: [[TMP149:%.*]] = load i64, ptr [[M_CASTED48]], align 8
8150 // CHECK14-NEXT: [[TMP150:%.*]] = load i32, ptr [[N]], align 4
8151 // CHECK14-NEXT: store i32 [[TMP150]], ptr [[N_CASTED49]], align 4
8152 // CHECK14-NEXT: [[TMP151:%.*]] = load i64, ptr [[N_CASTED49]], align 8
8153 // CHECK14-NEXT: [[TMP152:%.*]] = mul nuw i64 [[TMP1]], 4
8154 // CHECK14-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES53]], ptr align 8 @.offload_sizes.7, i64 32, i1 false)
8155 // CHECK14-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8156 // CHECK14-NEXT: store i64 [[TMP149]], ptr [[TMP153]], align 8
8157 // CHECK14-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8158 // CHECK14-NEXT: store i64 [[TMP149]], ptr [[TMP154]], align 8
8159 // CHECK14-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
8160 // CHECK14-NEXT: store ptr null, ptr [[TMP155]], align 8
8161 // CHECK14-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
8162 // CHECK14-NEXT: store i64 [[TMP151]], ptr [[TMP156]], align 8
8163 // CHECK14-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
8164 // CHECK14-NEXT: store i64 [[TMP151]], ptr [[TMP157]], align 8
8165 // CHECK14-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
8166 // CHECK14-NEXT: store ptr null, ptr [[TMP158]], align 8
8167 // CHECK14-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
8168 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP159]], align 8
8169 // CHECK14-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
8170 // CHECK14-NEXT: store i64 [[TMP1]], ptr [[TMP160]], align 8
8171 // CHECK14-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2
8172 // CHECK14-NEXT: store ptr null, ptr [[TMP161]], align 8
8173 // CHECK14-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
8174 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP162]], align 8
8175 // CHECK14-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
8176 // CHECK14-NEXT: store ptr [[VLA]], ptr [[TMP163]], align 8
8177 // CHECK14-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
8178 // CHECK14-NEXT: store i64 [[TMP152]], ptr [[TMP164]], align 8
8179 // CHECK14-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3
8180 // CHECK14-NEXT: store ptr null, ptr [[TMP165]], align 8
8181 // CHECK14-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
8182 // CHECK14-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
8183 // CHECK14-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
8184 // CHECK14-NEXT: [[TMP169:%.*]] = load i32, ptr [[N]], align 4
8185 // CHECK14-NEXT: store i32 [[TMP169]], ptr [[DOTCAPTURE_EXPR_55]], align 4
8186 // CHECK14-NEXT: [[TMP170:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
8187 // CHECK14-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP170]], 0
8188 // CHECK14-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
8189 // CHECK14-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
8190 // CHECK14-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
8191 // CHECK14-NEXT: [[TMP171:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
8192 // CHECK14-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP171]], 1
8193 // CHECK14-NEXT: [[TMP172:%.*]] = zext i32 [[ADD60]] to i64
8194 // CHECK14-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
8195 // CHECK14-NEXT: store i32 2, ptr [[TMP173]], align 4
8196 // CHECK14-NEXT: [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
8197 // CHECK14-NEXT: store i32 4, ptr [[TMP174]], align 4
8198 // CHECK14-NEXT: [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
8199 // CHECK14-NEXT: store ptr [[TMP166]], ptr [[TMP175]], align 8
8200 // CHECK14-NEXT: [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
8201 // CHECK14-NEXT: store ptr [[TMP167]], ptr [[TMP176]], align 8
8202 // CHECK14-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
8203 // CHECK14-NEXT: store ptr [[TMP168]], ptr [[TMP177]], align 8
8204 // CHECK14-NEXT: [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
8205 // CHECK14-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP178]], align 8
8206 // CHECK14-NEXT: [[TMP179:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
8207 // CHECK14-NEXT: store ptr null, ptr [[TMP179]], align 8
8208 // CHECK14-NEXT: [[TMP180:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
8209 // CHECK14-NEXT: store ptr null, ptr [[TMP180]], align 8
8210 // CHECK14-NEXT: [[TMP181:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
8211 // CHECK14-NEXT: store i64 [[TMP172]], ptr [[TMP181]], align 8
8212 // CHECK14-NEXT: [[TMP182:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
8213 // CHECK14-NEXT: store i64 0, ptr [[TMP182]], align 8
8214 // CHECK14-NEXT: [[TMP183:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
8215 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP183]], align 4
8216 // CHECK14-NEXT: [[TMP184:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
8217 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP184]], align 4
8218 // CHECK14-NEXT: [[TMP185:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
8219 // CHECK14-NEXT: store i32 0, ptr [[TMP185]], align 4
8220 // CHECK14-NEXT: [[TMP186:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
8221 // CHECK14-NEXT: [[TMP187:%.*]] = icmp ne i32 [[TMP186]], 0
8222 // CHECK14-NEXT: br i1 [[TMP187]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
8223 // CHECK14: omp_offload.failed62:
8224 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP149]], i64 [[TMP151]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR4]]
8225 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT63]]
8226 // CHECK14: omp_offload.cont63:
8227 // CHECK14-NEXT: [[TMP188:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
8228 // CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP188]])
8229 // CHECK14-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
8230 // CHECK14-NEXT: [[TMP189:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
8231 // CHECK14-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP189]])
8232 // CHECK14-NEXT: [[TMP190:%.*]] = load i32, ptr [[RETVAL]], align 4
8233 // CHECK14-NEXT: ret i32 [[TMP190]]
8236 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
8237 // CHECK14-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
8238 // CHECK14-NEXT: entry:
8239 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8240 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8241 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8242 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8243 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8244 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8245 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8246 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8247 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
8248 // CHECK14-NEXT: ret void
8251 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
8252 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3:[0-9]+]] {
8253 // CHECK14-NEXT: entry:
8254 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8255 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8256 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8257 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8258 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8259 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8260 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8261 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8262 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8263 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8264 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8265 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8266 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8267 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8268 // CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
8269 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8270 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8271 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8272 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8273 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8274 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8275 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8276 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8277 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8278 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8279 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8280 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8281 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8282 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8283 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8284 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8285 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8286 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8287 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8288 // CHECK14: omp.precond.then:
8289 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
8290 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8291 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
8292 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8293 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8294 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8295 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
8296 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8297 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8298 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8299 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
8300 // CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8301 // CHECK14: cond.true:
8302 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8303 // CHECK14-NEXT: br label [[COND_END:%.*]]
8304 // CHECK14: cond.false:
8305 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8306 // CHECK14-NEXT: br label [[COND_END]]
8307 // CHECK14: cond.end:
8308 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
8309 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
8310 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
8311 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
8312 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8313 // CHECK14: omp.inner.for.cond:
8314 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
8315 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
8316 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
8317 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8318 // CHECK14: omp.inner.for.body:
8319 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP13]]
8320 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
8321 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
8322 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
8323 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP13]]
8324 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8325 // CHECK14: omp.inner.for.inc:
8326 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
8327 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP13]]
8328 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
8329 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
8330 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
8331 // CHECK14: omp.inner.for.end:
8332 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8333 // CHECK14: omp.loop.exit:
8334 // CHECK14-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8335 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
8336 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
8337 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8338 // CHECK14-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
8339 // CHECK14-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8340 // CHECK14: .omp.final.then:
8341 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8342 // CHECK14-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
8343 // CHECK14-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8344 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8345 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8346 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
8347 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8348 // CHECK14: .omp.final.done:
8349 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8350 // CHECK14: omp.precond.end:
8351 // CHECK14-NEXT: ret void
8354 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
8355 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
8356 // CHECK14-NEXT: entry:
8357 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8358 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8359 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8360 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8361 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8362 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8363 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8364 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8365 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8366 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8367 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8368 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8369 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8370 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8371 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8372 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8373 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
8374 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8375 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8376 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8377 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8378 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8379 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8380 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8381 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8382 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8383 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8384 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8385 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8386 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8387 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8388 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8389 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8390 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8391 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8392 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8393 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8394 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8395 // CHECK14: omp.precond.then:
8396 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
8397 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8398 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
8399 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8400 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
8401 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8402 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
8403 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
8404 // CHECK14-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
8405 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8406 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8407 // CHECK14-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8408 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
8409 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8410 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8411 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8412 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
8413 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8414 // CHECK14: cond.true:
8415 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8416 // CHECK14-NEXT: br label [[COND_END:%.*]]
8417 // CHECK14: cond.false:
8418 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8419 // CHECK14-NEXT: br label [[COND_END]]
8420 // CHECK14: cond.end:
8421 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
8422 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
8423 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
8424 // CHECK14-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
8425 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8426 // CHECK14: omp.inner.for.cond:
8427 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]]
8428 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP17]]
8429 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
8430 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8431 // CHECK14: omp.inner.for.body:
8432 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
8433 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
8434 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8435 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
8436 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP17]]
8437 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
8438 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
8439 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]]
8440 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8441 // CHECK14: omp.body.continue:
8442 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8443 // CHECK14: omp.inner.for.inc:
8444 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
8445 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
8446 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]]
8447 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
8448 // CHECK14: omp.inner.for.end:
8449 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8450 // CHECK14: omp.loop.exit:
8451 // CHECK14-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8452 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
8453 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
8454 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8455 // CHECK14-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8456 // CHECK14-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8457 // CHECK14: .omp.final.then:
8458 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8459 // CHECK14-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
8460 // CHECK14-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
8461 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
8462 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
8463 // CHECK14-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
8464 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8465 // CHECK14: .omp.final.done:
8466 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8467 // CHECK14: omp.precond.end:
8468 // CHECK14-NEXT: ret void
8471 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
8472 // CHECK14-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8473 // CHECK14-NEXT: entry:
8474 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8475 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8476 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8477 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8478 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8479 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8480 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8481 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8482 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
8483 // CHECK14-NEXT: ret void
8486 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
8487 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
8488 // CHECK14-NEXT: entry:
8489 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8490 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8491 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8492 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8493 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8494 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8495 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8496 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8497 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8498 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8499 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8500 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8501 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8502 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8503 // CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
8504 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8505 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8506 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8507 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8508 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8509 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8510 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8511 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8512 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8513 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8514 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8515 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8516 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8517 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8518 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8519 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8520 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8521 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8522 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8523 // CHECK14: omp.precond.then:
8524 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
8525 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8526 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
8527 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8528 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8529 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8530 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
8531 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8532 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8533 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8534 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
8535 // CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8536 // CHECK14: cond.true:
8537 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8538 // CHECK14-NEXT: br label [[COND_END:%.*]]
8539 // CHECK14: cond.false:
8540 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8541 // CHECK14-NEXT: br label [[COND_END]]
8542 // CHECK14: cond.end:
8543 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
8544 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
8545 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
8546 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
8547 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8548 // CHECK14: omp.inner.for.cond:
8549 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
8550 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
8551 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
8552 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8553 // CHECK14: omp.inner.for.body:
8554 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP22]]
8555 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
8556 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
8557 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
8558 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP22]]
8559 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8560 // CHECK14: omp.inner.for.inc:
8561 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
8562 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP22]]
8563 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
8564 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
8565 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
8566 // CHECK14: omp.inner.for.end:
8567 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8568 // CHECK14: omp.loop.exit:
8569 // CHECK14-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8570 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
8571 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
8572 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8573 // CHECK14-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
8574 // CHECK14-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8575 // CHECK14: .omp.final.then:
8576 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8577 // CHECK14-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
8578 // CHECK14-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
8579 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
8580 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
8581 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
8582 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8583 // CHECK14: .omp.final.done:
8584 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8585 // CHECK14: omp.precond.end:
8586 // CHECK14-NEXT: ret void
8589 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
8590 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
8591 // CHECK14-NEXT: entry:
8592 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8593 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8594 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8595 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8596 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8597 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8598 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8599 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8600 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8601 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8602 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8603 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8604 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8605 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8606 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8607 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8608 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
8609 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8610 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8611 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8612 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8613 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8614 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8615 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8616 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8617 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8618 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8619 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8620 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
8621 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8622 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8623 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8624 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
8625 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8626 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8627 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8628 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8629 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8630 // CHECK14: omp.precond.then:
8631 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
8632 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8633 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
8634 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8635 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
8636 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8637 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
8638 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
8639 // CHECK14-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
8640 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8641 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8642 // CHECK14-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8643 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
8644 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8645 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8646 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8647 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
8648 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8649 // CHECK14: cond.true:
8650 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8651 // CHECK14-NEXT: br label [[COND_END:%.*]]
8652 // CHECK14: cond.false:
8653 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8654 // CHECK14-NEXT: br label [[COND_END]]
8655 // CHECK14: cond.end:
8656 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
8657 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
8658 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
8659 // CHECK14-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
8660 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8661 // CHECK14: omp.inner.for.cond:
8662 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
8663 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
8664 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
8665 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8666 // CHECK14: omp.inner.for.body:
8667 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
8668 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
8669 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8670 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
8671 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP25]]
8672 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
8673 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
8674 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
8675 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8676 // CHECK14: omp.body.continue:
8677 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8678 // CHECK14: omp.inner.for.inc:
8679 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
8680 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
8681 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
8682 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
8683 // CHECK14: omp.inner.for.end:
8684 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8685 // CHECK14: omp.loop.exit:
8686 // CHECK14-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8687 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
8688 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
8689 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8690 // CHECK14-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8691 // CHECK14-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8692 // CHECK14: .omp.final.then:
8693 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8694 // CHECK14-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
8695 // CHECK14-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
8696 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
8697 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
8698 // CHECK14-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
8699 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8700 // CHECK14: .omp.final.done:
8701 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8702 // CHECK14: omp.precond.end:
8703 // CHECK14-NEXT: ret void
8706 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
8707 // CHECK14-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8708 // CHECK14-NEXT: entry:
8709 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
8710 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8711 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8712 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8713 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
8714 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8715 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
8716 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8717 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8718 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8719 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8720 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8721 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
8722 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
8723 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
8724 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
8725 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
8726 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
8727 // CHECK14-NEXT: ret void
8730 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
8731 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
8732 // CHECK14-NEXT: entry:
8733 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8734 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8735 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8736 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8737 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8738 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8739 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8740 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8741 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8742 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8743 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8744 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
8745 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
8746 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8747 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8748 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
8749 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
8750 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8751 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8752 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8753 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8754 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8755 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
8756 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8757 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8758 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8759 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8760 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8761 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8762 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8763 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8764 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8765 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
8766 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8767 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8768 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8769 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8770 // CHECK14: omp.precond.then:
8771 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
8772 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8773 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
8774 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8775 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8776 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
8777 // CHECK14-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8778 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
8779 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
8780 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8781 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8782 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
8783 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8784 // CHECK14: cond.true:
8785 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8786 // CHECK14-NEXT: br label [[COND_END:%.*]]
8787 // CHECK14: cond.false:
8788 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
8789 // CHECK14-NEXT: br label [[COND_END]]
8790 // CHECK14: cond.end:
8791 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
8792 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
8793 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
8794 // CHECK14-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
8795 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8796 // CHECK14: omp.inner.for.cond:
8797 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
8798 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
8799 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
8800 // CHECK14-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
8801 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8802 // CHECK14: omp.inner.for.body:
8803 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8804 // CHECK14-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
8805 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8806 // CHECK14-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
8807 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
8808 // CHECK14-NEXT: store i32 [[TMP21]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP28]]
8809 // CHECK14-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP28]]
8810 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i64 [[TMP18]], i64 [[TMP20]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP22]]), !llvm.access.group [[ACC_GRP28]]
8811 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8812 // CHECK14: omp.inner.for.inc:
8813 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
8814 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
8815 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
8816 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
8817 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8818 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
8819 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
8820 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8821 // CHECK14-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8822 // CHECK14-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP28]]
8823 // CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
8824 // CHECK14-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8825 // CHECK14-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8826 // CHECK14-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
8827 // CHECK14-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP29]], [[TMP30]]
8828 // CHECK14-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
8829 // CHECK14: cond.true11:
8830 // CHECK14-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP28]]
8831 // CHECK14-NEXT: br label [[COND_END13:%.*]]
8832 // CHECK14: cond.false12:
8833 // CHECK14-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8834 // CHECK14-NEXT: br label [[COND_END13]]
8835 // CHECK14: cond.end13:
8836 // CHECK14-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE11]] ], [ [[TMP32]], [[COND_FALSE12]] ]
8837 // CHECK14-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
8838 // CHECK14-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP28]]
8839 // CHECK14-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
8840 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
8841 // CHECK14: omp.inner.for.end:
8842 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8843 // CHECK14: omp.loop.exit:
8844 // CHECK14-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8845 // CHECK14-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4
8846 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP35]])
8847 // CHECK14-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8848 // CHECK14-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
8849 // CHECK14-NEXT: br i1 [[TMP37]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8850 // CHECK14: .omp.final.then:
8851 // CHECK14-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8852 // CHECK14-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP38]], 0
8853 // CHECK14-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
8854 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
8855 // CHECK14-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
8856 // CHECK14-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
8857 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8858 // CHECK14: .omp.final.done:
8859 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8860 // CHECK14: omp.precond.end:
8861 // CHECK14-NEXT: ret void
8864 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
8865 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
8866 // CHECK14-NEXT: entry:
8867 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
8868 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
8869 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
8870 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
8871 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
8872 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8873 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8874 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
8875 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
8876 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
8877 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
8878 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
8879 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
8880 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
8881 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
8882 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
8883 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
8884 // CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
8885 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
8886 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
8887 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8888 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8889 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
8890 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8891 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8892 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
8893 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
8894 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8895 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8896 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
8897 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
8898 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8899 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
8900 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
8901 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
8902 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
8903 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
8904 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8905 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
8906 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
8907 // CHECK14: omp.precond.then:
8908 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
8909 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8910 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
8911 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
8912 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
8913 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
8914 // CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
8915 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
8916 // CHECK14-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
8917 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
8918 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
8919 // CHECK14-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8920 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
8921 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
8922 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8923 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8924 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
8925 // CHECK14-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
8926 // CHECK14: cond.true:
8927 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
8928 // CHECK14-NEXT: br label [[COND_END:%.*]]
8929 // CHECK14: cond.false:
8930 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
8931 // CHECK14-NEXT: br label [[COND_END]]
8932 // CHECK14: cond.end:
8933 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
8934 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
8935 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
8936 // CHECK14-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
8937 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
8938 // CHECK14: omp.inner.for.cond:
8939 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
8940 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]]
8941 // CHECK14-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
8942 // CHECK14-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
8943 // CHECK14: omp.inner.for.body:
8944 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
8945 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
8946 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
8947 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
8948 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP31]]
8949 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
8950 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
8951 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
8952 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
8953 // CHECK14: omp.body.continue:
8954 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
8955 // CHECK14: omp.inner.for.inc:
8956 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
8957 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], 1
8958 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
8959 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
8960 // CHECK14: omp.inner.for.end:
8961 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
8962 // CHECK14: omp.loop.exit:
8963 // CHECK14-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
8964 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
8965 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
8966 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
8967 // CHECK14-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
8968 // CHECK14-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
8969 // CHECK14: .omp.final.then:
8970 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
8971 // CHECK14-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP25]], 0
8972 // CHECK14-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
8973 // CHECK14-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
8974 // CHECK14-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
8975 // CHECK14-NEXT: store i32 [[ADD12]], ptr [[I5]], align 4
8976 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
8977 // CHECK14: .omp.final.done:
8978 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
8979 // CHECK14: omp.precond.end:
8980 // CHECK14-NEXT: ret void
8983 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
8984 // CHECK14-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
8985 // CHECK14-NEXT: entry:
8986 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
8987 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
8988 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
8989 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
8990 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
8991 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
8992 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
8993 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
8994 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]])
8995 // CHECK14-NEXT: ret void
8998 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
8999 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
9000 // CHECK14-NEXT: entry:
9001 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9002 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9003 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
9004 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9005 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9006 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9007 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9008 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9009 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9010 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9011 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9012 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9013 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9014 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9015 // CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4
9016 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9017 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9018 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9019 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9020 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9021 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9022 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9023 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9024 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9025 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
9026 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9027 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9028 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9029 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9030 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9031 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9032 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9033 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9034 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9035 // CHECK14: omp.precond.then:
9036 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9037 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9038 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
9039 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9040 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9041 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9042 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
9043 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9044 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9045 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9046 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
9047 // CHECK14-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9048 // CHECK14: cond.true:
9049 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9050 // CHECK14-NEXT: br label [[COND_END:%.*]]
9051 // CHECK14: cond.false:
9052 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9053 // CHECK14-NEXT: br label [[COND_END]]
9054 // CHECK14: cond.end:
9055 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
9056 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9057 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9058 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
9059 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9060 // CHECK14: omp.inner.for.cond:
9061 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
9062 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
9063 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
9064 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9065 // CHECK14: omp.inner.for.body:
9066 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP34]]
9067 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
9068 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]]
9069 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
9070 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP34]]
9071 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9072 // CHECK14: omp.inner.for.inc:
9073 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
9074 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP34]]
9075 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
9076 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
9077 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
9078 // CHECK14: omp.inner.for.end:
9079 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9080 // CHECK14: omp.loop.exit:
9081 // CHECK14-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9082 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
9083 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
9084 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9085 // CHECK14-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
9086 // CHECK14-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9087 // CHECK14: .omp.final.then:
9088 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9089 // CHECK14-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP26]], 0
9090 // CHECK14-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
9091 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
9092 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
9093 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
9094 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9095 // CHECK14: .omp.final.done:
9096 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9097 // CHECK14: omp.precond.end:
9098 // CHECK14-NEXT: ret void
9101 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
9102 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
9103 // CHECK14-NEXT: entry:
9104 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9105 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9106 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9107 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9108 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
9109 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9110 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9111 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9112 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9113 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9114 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9115 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9116 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9117 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9118 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9119 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9120 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
9121 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9122 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9123 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9124 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9125 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9126 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9127 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9128 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9129 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9130 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9131 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9132 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
9133 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9134 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9135 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9136 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
9137 // CHECK14-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9138 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9139 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9140 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9141 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9142 // CHECK14: omp.precond.then:
9143 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9144 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9145 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
9146 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9147 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
9148 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9149 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
9150 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9151 // CHECK14-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
9152 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9153 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9154 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9155 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9156 // CHECK14-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9157 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
9158 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
9159 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
9160 // CHECK14: omp.dispatch.cond:
9161 // CHECK14-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9162 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
9163 // CHECK14-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
9164 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
9165 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9166 // CHECK14: omp.dispatch.body:
9167 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9168 // CHECK14-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
9169 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9170 // CHECK14: omp.inner.for.cond:
9171 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]]
9172 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]]
9173 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
9174 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9175 // CHECK14: omp.inner.for.body:
9176 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
9177 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
9178 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9179 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
9180 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP37]]
9181 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
9182 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
9183 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]]
9184 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9185 // CHECK14: omp.body.continue:
9186 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9187 // CHECK14: omp.inner.for.inc:
9188 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
9189 // CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], 1
9190 // CHECK14-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]]
9191 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]]
9192 // CHECK14: omp.inner.for.end:
9193 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
9194 // CHECK14: omp.dispatch.inc:
9195 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
9196 // CHECK14: omp.dispatch.end:
9197 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9198 // CHECK14-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
9199 // CHECK14-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9200 // CHECK14: .omp.final.then:
9201 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9202 // CHECK14-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP24]], 0
9203 // CHECK14-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9204 // CHECK14-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
9205 // CHECK14-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
9206 // CHECK14-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
9207 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9208 // CHECK14: .omp.final.done:
9209 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9210 // CHECK14: omp.precond.end:
9211 // CHECK14-NEXT: ret void
9214 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
9215 // CHECK14-SAME: (i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
9216 // CHECK14-NEXT: entry:
9217 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
9218 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
9219 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9220 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9221 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
9222 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9223 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
9224 // CHECK14-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
9225 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9226 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9227 // CHECK14-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9228 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9229 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
9230 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
9231 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
9232 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
9233 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
9234 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]])
9235 // CHECK14-NEXT: ret void
9238 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
9239 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
9240 // CHECK14-NEXT: entry:
9241 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9242 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9243 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
9244 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9245 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9246 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9247 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9248 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9249 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9250 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9251 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9252 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9253 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9254 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9255 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9256 // CHECK14-NEXT: [[I4:%.*]] = alloca i32, align 4
9257 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
9258 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9259 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9260 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9261 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9262 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9263 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
9264 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9265 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9266 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9267 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9268 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9269 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9270 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9271 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9272 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9273 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
9274 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9275 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9276 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9277 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9278 // CHECK14: omp.precond.then:
9279 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9280 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9281 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
9282 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9283 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9284 // CHECK14-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9285 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
9286 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9287 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9288 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9289 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
9290 // CHECK14-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9291 // CHECK14: cond.true:
9292 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9293 // CHECK14-NEXT: br label [[COND_END:%.*]]
9294 // CHECK14: cond.false:
9295 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9296 // CHECK14-NEXT: br label [[COND_END]]
9297 // CHECK14: cond.end:
9298 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
9299 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9300 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9301 // CHECK14-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
9302 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9303 // CHECK14: omp.inner.for.cond:
9304 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]]
9305 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
9306 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
9307 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9308 // CHECK14: omp.inner.for.body:
9309 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP40]]
9310 // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
9311 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP40]]
9312 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64
9313 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP40]]
9314 // CHECK14-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP40]]
9315 // CHECK14-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP40]]
9316 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], i64 [[TMP21]]), !llvm.access.group [[ACC_GRP40]]
9317 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9318 // CHECK14: omp.inner.for.inc:
9319 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
9320 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP40]]
9321 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
9322 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]]
9323 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]]
9324 // CHECK14: omp.inner.for.end:
9325 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9326 // CHECK14: omp.loop.exit:
9327 // CHECK14-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9328 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
9329 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP25]])
9330 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9331 // CHECK14-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
9332 // CHECK14-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9333 // CHECK14: .omp.final.then:
9334 // CHECK14-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9335 // CHECK14-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP28]], 0
9336 // CHECK14-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
9337 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
9338 // CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
9339 // CHECK14-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
9340 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9341 // CHECK14: .omp.final.done:
9342 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9343 // CHECK14: omp.precond.end:
9344 // CHECK14-NEXT: ret void
9347 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
9348 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
9349 // CHECK14-NEXT: entry:
9350 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9351 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9352 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9353 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9354 // CHECK14-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
9355 // CHECK14-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
9356 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9357 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
9358 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9359 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9360 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
9361 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
9362 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9363 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9364 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9365 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9366 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9367 // CHECK14-NEXT: [[I5:%.*]] = alloca i32, align 4
9368 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9369 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9370 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9371 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9372 // CHECK14-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
9373 // CHECK14-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
9374 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9375 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
9376 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8
9377 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
9378 // CHECK14-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9379 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
9380 // CHECK14-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
9381 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9382 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
9383 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
9384 // CHECK14-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
9385 // CHECK14-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
9386 // CHECK14-NEXT: store i32 0, ptr [[I]], align 4
9387 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9388 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
9389 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
9390 // CHECK14: omp.precond.then:
9391 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9392 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
9393 // CHECK14-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
9394 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9395 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP7]] to i32
9396 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9397 // CHECK14-NEXT: [[CONV4:%.*]] = trunc i64 [[TMP8]] to i32
9398 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9399 // CHECK14-NEXT: store i32 [[CONV4]], ptr [[DOTOMP_UB]], align 4
9400 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9401 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9402 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
9403 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9404 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9405 // CHECK14-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9406 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
9407 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 1073741859, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
9408 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
9409 // CHECK14: omp.dispatch.cond:
9410 // CHECK14-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9411 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
9412 // CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
9413 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
9414 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
9415 // CHECK14: omp.dispatch.body:
9416 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9417 // CHECK14-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
9418 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9419 // CHECK14: omp.inner.for.cond:
9420 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]]
9421 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP43]]
9422 // CHECK14-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
9423 // CHECK14-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9424 // CHECK14: omp.inner.for.body:
9425 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
9426 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
9427 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9428 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
9429 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]]
9430 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
9431 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
9432 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]]
9433 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9434 // CHECK14: omp.body.continue:
9435 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9436 // CHECK14: omp.inner.for.inc:
9437 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
9438 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP22]], 1
9439 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]]
9440 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]]
9441 // CHECK14: omp.inner.for.end:
9442 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
9443 // CHECK14: omp.dispatch.inc:
9444 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
9445 // CHECK14: omp.dispatch.end:
9446 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9447 // CHECK14-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
9448 // CHECK14-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9449 // CHECK14: .omp.final.then:
9450 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
9451 // CHECK14-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
9452 // CHECK14-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
9453 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
9454 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
9455 // CHECK14-NEXT: store i32 [[ADD11]], ptr [[I5]], align 4
9456 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9457 // CHECK14: .omp.final.done:
9458 // CHECK14-NEXT: br label [[OMP_PRECOND_END]]
9459 // CHECK14: omp.precond.end:
9460 // CHECK14-NEXT: ret void
9463 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
9464 // CHECK14-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat {
9465 // CHECK14-NEXT: entry:
9466 // CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
9467 // CHECK14-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
9468 // CHECK14-NEXT: [[M:%.*]] = alloca i32, align 4
9469 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
9470 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
9471 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
9472 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9473 // CHECK14-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
9474 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 8
9475 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 8
9476 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 8
9477 // CHECK14-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
9478 // CHECK14-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9479 // CHECK14-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8
9480 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 8
9481 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 8
9482 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 8
9483 // CHECK14-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
9484 // CHECK14-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9485 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 8
9486 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 8
9487 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 8
9488 // CHECK14-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
9489 // CHECK14-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9490 // CHECK14-NEXT: [[M_CASTED22:%.*]] = alloca i64, align 8
9491 // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 8
9492 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 8
9493 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 8
9494 // CHECK14-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
9495 // CHECK14-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
9496 // CHECK14-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
9497 // CHECK14-NEXT: store i32 10, ptr [[M]], align 4
9498 // CHECK14-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9499 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP0]], align 8
9500 // CHECK14-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9501 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP1]], align 8
9502 // CHECK14-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
9503 // CHECK14-NEXT: store ptr null, ptr [[TMP2]], align 8
9504 // CHECK14-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
9505 // CHECK14-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
9506 // CHECK14-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
9507 // CHECK14-NEXT: store i32 2, ptr [[TMP5]], align 4
9508 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
9509 // CHECK14-NEXT: store i32 1, ptr [[TMP6]], align 4
9510 // CHECK14-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
9511 // CHECK14-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
9512 // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
9513 // CHECK14-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
9514 // CHECK14-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
9515 // CHECK14-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 8
9516 // CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
9517 // CHECK14-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 8
9518 // CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
9519 // CHECK14-NEXT: store ptr null, ptr [[TMP11]], align 8
9520 // CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
9521 // CHECK14-NEXT: store ptr null, ptr [[TMP12]], align 8
9522 // CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
9523 // CHECK14-NEXT: store i64 10, ptr [[TMP13]], align 8
9524 // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
9525 // CHECK14-NEXT: store i64 0, ptr [[TMP14]], align 8
9526 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
9527 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
9528 // CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
9529 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
9530 // CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
9531 // CHECK14-NEXT: store i32 0, ptr [[TMP17]], align 4
9532 // CHECK14-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
9533 // CHECK14-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
9534 // CHECK14-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
9535 // CHECK14: omp_offload.failed:
9536 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR4]]
9537 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]]
9538 // CHECK14: omp_offload.cont:
9539 // CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
9540 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP20]], align 8
9541 // CHECK14-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
9542 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP21]], align 8
9543 // CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
9544 // CHECK14-NEXT: store ptr null, ptr [[TMP22]], align 8
9545 // CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
9546 // CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
9547 // CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
9548 // CHECK14-NEXT: store i32 2, ptr [[TMP25]], align 4
9549 // CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
9550 // CHECK14-NEXT: store i32 1, ptr [[TMP26]], align 4
9551 // CHECK14-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
9552 // CHECK14-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
9553 // CHECK14-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
9554 // CHECK14-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
9555 // CHECK14-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
9556 // CHECK14-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 8
9557 // CHECK14-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
9558 // CHECK14-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 8
9559 // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
9560 // CHECK14-NEXT: store ptr null, ptr [[TMP31]], align 8
9561 // CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
9562 // CHECK14-NEXT: store ptr null, ptr [[TMP32]], align 8
9563 // CHECK14-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
9564 // CHECK14-NEXT: store i64 10, ptr [[TMP33]], align 8
9565 // CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
9566 // CHECK14-NEXT: store i64 0, ptr [[TMP34]], align 8
9567 // CHECK14-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
9568 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
9569 // CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
9570 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
9571 // CHECK14-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
9572 // CHECK14-NEXT: store i32 0, ptr [[TMP37]], align 4
9573 // CHECK14-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
9574 // CHECK14-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
9575 // CHECK14-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
9576 // CHECK14: omp_offload.failed6:
9577 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR4]]
9578 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT7]]
9579 // CHECK14: omp_offload.cont7:
9580 // CHECK14-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
9581 // CHECK14-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
9582 // CHECK14-NEXT: [[TMP41:%.*]] = load i64, ptr [[M_CASTED]], align 8
9583 // CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
9584 // CHECK14-NEXT: store i64 [[TMP41]], ptr [[TMP42]], align 8
9585 // CHECK14-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
9586 // CHECK14-NEXT: store i64 [[TMP41]], ptr [[TMP43]], align 8
9587 // CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 0
9588 // CHECK14-NEXT: store ptr null, ptr [[TMP44]], align 8
9589 // CHECK14-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
9590 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP45]], align 8
9591 // CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
9592 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP46]], align 8
9593 // CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 1
9594 // CHECK14-NEXT: store ptr null, ptr [[TMP47]], align 8
9595 // CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
9596 // CHECK14-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
9597 // CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
9598 // CHECK14-NEXT: store i32 2, ptr [[TMP50]], align 4
9599 // CHECK14-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
9600 // CHECK14-NEXT: store i32 2, ptr [[TMP51]], align 4
9601 // CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
9602 // CHECK14-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 8
9603 // CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
9604 // CHECK14-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 8
9605 // CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
9606 // CHECK14-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 8
9607 // CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
9608 // CHECK14-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 8
9609 // CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
9610 // CHECK14-NEXT: store ptr null, ptr [[TMP56]], align 8
9611 // CHECK14-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
9612 // CHECK14-NEXT: store ptr null, ptr [[TMP57]], align 8
9613 // CHECK14-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
9614 // CHECK14-NEXT: store i64 10, ptr [[TMP58]], align 8
9615 // CHECK14-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
9616 // CHECK14-NEXT: store i64 0, ptr [[TMP59]], align 8
9617 // CHECK14-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
9618 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
9619 // CHECK14-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
9620 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
9621 // CHECK14-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
9622 // CHECK14-NEXT: store i32 0, ptr [[TMP62]], align 4
9623 // CHECK14-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
9624 // CHECK14-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
9625 // CHECK14-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
9626 // CHECK14: omp_offload.failed13:
9627 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i64 [[TMP41]], ptr [[A]]) #[[ATTR4]]
9628 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT14]]
9629 // CHECK14: omp_offload.cont14:
9630 // CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
9631 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP65]], align 8
9632 // CHECK14-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
9633 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP66]], align 8
9634 // CHECK14-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 0
9635 // CHECK14-NEXT: store ptr null, ptr [[TMP67]], align 8
9636 // CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
9637 // CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
9638 // CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
9639 // CHECK14-NEXT: store i32 2, ptr [[TMP70]], align 4
9640 // CHECK14-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
9641 // CHECK14-NEXT: store i32 1, ptr [[TMP71]], align 4
9642 // CHECK14-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
9643 // CHECK14-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 8
9644 // CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
9645 // CHECK14-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 8
9646 // CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
9647 // CHECK14-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 8
9648 // CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
9649 // CHECK14-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 8
9650 // CHECK14-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
9651 // CHECK14-NEXT: store ptr null, ptr [[TMP76]], align 8
9652 // CHECK14-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
9653 // CHECK14-NEXT: store ptr null, ptr [[TMP77]], align 8
9654 // CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
9655 // CHECK14-NEXT: store i64 10, ptr [[TMP78]], align 8
9656 // CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
9657 // CHECK14-NEXT: store i64 0, ptr [[TMP79]], align 8
9658 // CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
9659 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
9660 // CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
9661 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
9662 // CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
9663 // CHECK14-NEXT: store i32 0, ptr [[TMP82]], align 4
9664 // CHECK14-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
9665 // CHECK14-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
9666 // CHECK14-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
9667 // CHECK14: omp_offload.failed20:
9668 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR4]]
9669 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT21]]
9670 // CHECK14: omp_offload.cont21:
9671 // CHECK14-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
9672 // CHECK14-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
9673 // CHECK14-NEXT: [[TMP86:%.*]] = load i64, ptr [[M_CASTED22]], align 8
9674 // CHECK14-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
9675 // CHECK14-NEXT: store i64 [[TMP86]], ptr [[TMP87]], align 8
9676 // CHECK14-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
9677 // CHECK14-NEXT: store i64 [[TMP86]], ptr [[TMP88]], align 8
9678 // CHECK14-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 0
9679 // CHECK14-NEXT: store ptr null, ptr [[TMP89]], align 8
9680 // CHECK14-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
9681 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP90]], align 8
9682 // CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
9683 // CHECK14-NEXT: store ptr [[A]], ptr [[TMP91]], align 8
9684 // CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i64 0, i64 1
9685 // CHECK14-NEXT: store ptr null, ptr [[TMP92]], align 8
9686 // CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
9687 // CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
9688 // CHECK14-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
9689 // CHECK14-NEXT: store i32 2, ptr [[TMP95]], align 4
9690 // CHECK14-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
9691 // CHECK14-NEXT: store i32 2, ptr [[TMP96]], align 4
9692 // CHECK14-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
9693 // CHECK14-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 8
9694 // CHECK14-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
9695 // CHECK14-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 8
9696 // CHECK14-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
9697 // CHECK14-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 8
9698 // CHECK14-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
9699 // CHECK14-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 8
9700 // CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
9701 // CHECK14-NEXT: store ptr null, ptr [[TMP101]], align 8
9702 // CHECK14-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
9703 // CHECK14-NEXT: store ptr null, ptr [[TMP102]], align 8
9704 // CHECK14-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
9705 // CHECK14-NEXT: store i64 10, ptr [[TMP103]], align 8
9706 // CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
9707 // CHECK14-NEXT: store i64 0, ptr [[TMP104]], align 8
9708 // CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
9709 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
9710 // CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
9711 // CHECK14-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
9712 // CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
9713 // CHECK14-NEXT: store i32 0, ptr [[TMP107]], align 4
9714 // CHECK14-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
9715 // CHECK14-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
9716 // CHECK14-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
9717 // CHECK14: omp_offload.failed28:
9718 // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i64 [[TMP86]], ptr [[A]]) #[[ATTR4]]
9719 // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT29]]
9720 // CHECK14: omp_offload.cont29:
9721 // CHECK14-NEXT: ret i32 0
9724 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
9725 // CHECK14-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9726 // CHECK14-NEXT: entry:
9727 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9728 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9729 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9730 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
9731 // CHECK14-NEXT: ret void
9734 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
9735 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
9736 // CHECK14-NEXT: entry:
9737 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9738 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9739 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9740 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9741 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9742 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9743 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9744 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9745 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9746 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9747 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9748 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9749 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9750 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9751 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9752 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
9753 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9754 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9755 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9756 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
9757 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9758 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9759 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
9760 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9761 // CHECK14: cond.true:
9762 // CHECK14-NEXT: br label [[COND_END:%.*]]
9763 // CHECK14: cond.false:
9764 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9765 // CHECK14-NEXT: br label [[COND_END]]
9766 // CHECK14: cond.end:
9767 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
9768 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9769 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9770 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
9771 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9772 // CHECK14: omp.inner.for.cond:
9773 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]]
9774 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
9775 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
9776 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9777 // CHECK14: omp.inner.for.body:
9778 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP46]]
9779 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
9780 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]]
9781 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
9782 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP46]]
9783 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9784 // CHECK14: omp.inner.for.inc:
9785 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
9786 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP46]]
9787 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
9788 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]]
9789 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]]
9790 // CHECK14: omp.inner.for.end:
9791 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9792 // CHECK14: omp.loop.exit:
9793 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
9794 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9795 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
9796 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9797 // CHECK14: .omp.final.then:
9798 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
9799 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9800 // CHECK14: .omp.final.done:
9801 // CHECK14-NEXT: ret void
9804 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
9805 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
9806 // CHECK14-NEXT: entry:
9807 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9808 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9809 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9810 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9811 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9812 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9813 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9814 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9815 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9816 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9817 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9818 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9819 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9820 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9821 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9822 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9823 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9824 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9825 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9826 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
9827 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9828 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
9829 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9830 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
9831 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9832 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
9833 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9834 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9835 // CHECK14-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9836 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
9837 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9838 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9839 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
9840 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9841 // CHECK14: cond.true:
9842 // CHECK14-NEXT: br label [[COND_END:%.*]]
9843 // CHECK14: cond.false:
9844 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
9845 // CHECK14-NEXT: br label [[COND_END]]
9846 // CHECK14: cond.end:
9847 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
9848 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
9849 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
9850 // CHECK14-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
9851 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9852 // CHECK14: omp.inner.for.cond:
9853 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]]
9854 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP49]]
9855 // CHECK14-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
9856 // CHECK14-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9857 // CHECK14: omp.inner.for.body:
9858 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
9859 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
9860 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
9861 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
9862 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]]
9863 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
9864 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
9865 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP49]]
9866 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
9867 // CHECK14: omp.body.continue:
9868 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9869 // CHECK14: omp.inner.for.inc:
9870 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
9871 // CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
9872 // CHECK14-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]]
9873 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]]
9874 // CHECK14: omp.inner.for.end:
9875 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9876 // CHECK14: omp.loop.exit:
9877 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
9878 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9879 // CHECK14-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
9880 // CHECK14-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9881 // CHECK14: .omp.final.then:
9882 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
9883 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9884 // CHECK14: .omp.final.done:
9885 // CHECK14-NEXT: ret void
9888 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
9889 // CHECK14-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
9890 // CHECK14-NEXT: entry:
9891 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9892 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9893 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9894 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
9895 // CHECK14-NEXT: ret void
9898 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
9899 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
9900 // CHECK14-NEXT: entry:
9901 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9902 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9903 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9904 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9905 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9906 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
9907 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
9908 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9909 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9910 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9911 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9912 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9913 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9914 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9915 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
9916 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
9917 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9918 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9919 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
9920 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
9921 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
9922 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9923 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
9924 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
9925 // CHECK14: cond.true:
9926 // CHECK14-NEXT: br label [[COND_END:%.*]]
9927 // CHECK14: cond.false:
9928 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
9929 // CHECK14-NEXT: br label [[COND_END]]
9930 // CHECK14: cond.end:
9931 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
9932 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
9933 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
9934 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
9935 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
9936 // CHECK14: omp.inner.for.cond:
9937 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]]
9938 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
9939 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
9940 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
9941 // CHECK14: omp.inner.for.body:
9942 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP52]]
9943 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
9944 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP52]]
9945 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
9946 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP52]]
9947 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
9948 // CHECK14: omp.inner.for.inc:
9949 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
9950 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP52]]
9951 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
9952 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]]
9953 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]]
9954 // CHECK14: omp.inner.for.end:
9955 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
9956 // CHECK14: omp.loop.exit:
9957 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
9958 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
9959 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
9960 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
9961 // CHECK14: .omp.final.then:
9962 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
9963 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
9964 // CHECK14: .omp.final.done:
9965 // CHECK14-NEXT: ret void
9968 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
9969 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
9970 // CHECK14-NEXT: entry:
9971 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
9972 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
9973 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
9974 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
9975 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
9976 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
9977 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
9978 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
9979 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
9980 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
9981 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
9982 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
9983 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
9984 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
9985 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9986 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9987 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
9988 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
9989 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
9990 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
9991 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
9992 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
9993 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
9994 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
9995 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
9996 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
9997 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
9998 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
9999 // CHECK14-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10000 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
10001 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
10002 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10003 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
10004 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10005 // CHECK14: cond.true:
10006 // CHECK14-NEXT: br label [[COND_END:%.*]]
10007 // CHECK14: cond.false:
10008 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10009 // CHECK14-NEXT: br label [[COND_END]]
10010 // CHECK14: cond.end:
10011 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
10012 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
10013 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10014 // CHECK14-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
10015 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10016 // CHECK14: omp.inner.for.cond:
10017 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]]
10018 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP55]]
10019 // CHECK14-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
10020 // CHECK14-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10021 // CHECK14: omp.inner.for.body:
10022 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
10023 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
10024 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10025 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
10026 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP55]]
10027 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
10028 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10029 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP55]]
10030 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10031 // CHECK14: omp.body.continue:
10032 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10033 // CHECK14: omp.inner.for.inc:
10034 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
10035 // CHECK14-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
10036 // CHECK14-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]]
10037 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]]
10038 // CHECK14: omp.inner.for.end:
10039 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10040 // CHECK14: omp.loop.exit:
10041 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
10042 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10043 // CHECK14-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
10044 // CHECK14-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10045 // CHECK14: .omp.final.then:
10046 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10047 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10048 // CHECK14: .omp.final.done:
10049 // CHECK14-NEXT: ret void
10052 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
10053 // CHECK14-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10054 // CHECK14-NEXT: entry:
10055 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
10056 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10057 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10058 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10059 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
10060 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10061 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10062 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
10063 // CHECK14-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
10064 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
10065 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
10066 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
10067 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
10068 // CHECK14-NEXT: ret void
10071 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
10072 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
10073 // CHECK14-NEXT: entry:
10074 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10075 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10076 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10077 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10078 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10079 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10080 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10081 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10082 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10083 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10084 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10085 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10086 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10087 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10088 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10089 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10090 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10091 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
10092 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
10093 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10094 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10095 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10096 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
10097 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
10098 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10099 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10100 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10101 // CHECK14: cond.true:
10102 // CHECK14-NEXT: br label [[COND_END:%.*]]
10103 // CHECK14: cond.false:
10104 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10105 // CHECK14-NEXT: br label [[COND_END]]
10106 // CHECK14: cond.end:
10107 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10108 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
10109 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
10110 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
10111 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10112 // CHECK14: omp.inner.for.cond:
10113 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]]
10114 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
10115 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10116 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10117 // CHECK14: omp.inner.for.body:
10118 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP58]]
10119 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10120 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP58]]
10121 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10122 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP58]]
10123 // CHECK14-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP58]]
10124 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP58]]
10125 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP58]]
10126 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10127 // CHECK14: omp.inner.for.inc:
10128 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
10129 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP58]]
10130 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
10131 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]]
10132 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]]
10133 // CHECK14: omp.inner.for.end:
10134 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10135 // CHECK14: omp.loop.exit:
10136 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
10137 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10138 // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
10139 // CHECK14-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10140 // CHECK14: .omp.final.then:
10141 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10142 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10143 // CHECK14: .omp.final.done:
10144 // CHECK14-NEXT: ret void
10147 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
10148 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
10149 // CHECK14-NEXT: entry:
10150 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10151 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10152 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10153 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10154 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10155 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10156 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10157 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10158 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10159 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10160 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10161 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10162 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10163 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10164 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10165 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10166 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10167 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10168 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10169 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10170 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
10171 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
10172 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10173 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10174 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10175 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10176 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
10177 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
10178 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10179 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10180 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
10181 // CHECK14-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10182 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
10183 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
10184 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
10185 // CHECK14: omp.dispatch.cond:
10186 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10187 // CHECK14-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10188 // CHECK14-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP7]] to i32
10189 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[CONV2]]
10190 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10191 // CHECK14: cond.true:
10192 // CHECK14-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10193 // CHECK14-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP8]] to i32
10194 // CHECK14-NEXT: br label [[COND_END:%.*]]
10195 // CHECK14: cond.false:
10196 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10197 // CHECK14-NEXT: br label [[COND_END]]
10198 // CHECK14: cond.end:
10199 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ [[CONV3]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
10200 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
10201 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10202 // CHECK14-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
10203 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
10204 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10205 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
10206 // CHECK14-NEXT: br i1 [[CMP4]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10207 // CHECK14: omp.dispatch.body:
10208 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10209 // CHECK14: omp.inner.for.cond:
10210 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61:![0-9]+]]
10211 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP61]]
10212 // CHECK14-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
10213 // CHECK14-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10214 // CHECK14: omp.inner.for.body:
10215 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
10216 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
10217 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10218 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
10219 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP61]]
10220 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64
10221 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10222 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP61]]
10223 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10224 // CHECK14: omp.body.continue:
10225 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10226 // CHECK14: omp.inner.for.inc:
10227 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
10228 // CHECK14-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], 1
10229 // CHECK14-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP61]]
10230 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP62:![0-9]+]]
10231 // CHECK14: omp.inner.for.end:
10232 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
10233 // CHECK14: omp.dispatch.inc:
10234 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10235 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
10236 // CHECK14-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
10237 // CHECK14-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_LB]], align 4
10238 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10239 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
10240 // CHECK14-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
10241 // CHECK14-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_UB]], align 4
10242 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
10243 // CHECK14: omp.dispatch.end:
10244 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
10245 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10246 // CHECK14-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
10247 // CHECK14-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10248 // CHECK14: .omp.final.then:
10249 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10250 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10251 // CHECK14: .omp.final.done:
10252 // CHECK14-NEXT: ret void
10255 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
10256 // CHECK14-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10257 // CHECK14-NEXT: entry:
10258 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10259 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10260 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10261 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
10262 // CHECK14-NEXT: ret void
10265 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
10266 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
10267 // CHECK14-NEXT: entry:
10268 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10269 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10270 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10271 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10272 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10273 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10274 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10275 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10276 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10277 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10278 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10279 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10280 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10281 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10282 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
10283 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
10284 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10285 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10286 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10287 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
10288 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
10289 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10290 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10291 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10292 // CHECK14: cond.true:
10293 // CHECK14-NEXT: br label [[COND_END:%.*]]
10294 // CHECK14: cond.false:
10295 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10296 // CHECK14-NEXT: br label [[COND_END]]
10297 // CHECK14: cond.end:
10298 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10299 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
10300 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
10301 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
10302 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10303 // CHECK14: omp.inner.for.cond:
10304 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64:![0-9]+]]
10305 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
10306 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10307 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10308 // CHECK14: omp.inner.for.body:
10309 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP64]]
10310 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10311 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP64]]
10312 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10313 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP64]]
10314 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10315 // CHECK14: omp.inner.for.inc:
10316 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
10317 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP64]]
10318 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
10319 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP64]]
10320 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP65:![0-9]+]]
10321 // CHECK14: omp.inner.for.end:
10322 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10323 // CHECK14: omp.loop.exit:
10324 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
10325 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10326 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
10327 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10328 // CHECK14: .omp.final.then:
10329 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10330 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10331 // CHECK14: .omp.final.done:
10332 // CHECK14-NEXT: ret void
10335 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
10336 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
10337 // CHECK14-NEXT: entry:
10338 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10339 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10340 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10341 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10342 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10343 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10344 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10345 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10346 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10347 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10348 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10349 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10350 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10351 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10352 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10353 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10354 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10355 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10356 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
10357 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
10358 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10359 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10360 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10361 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10362 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
10363 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
10364 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10365 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10366 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10367 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10368 // CHECK14-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10369 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
10370 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
10371 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
10372 // CHECK14: omp.dispatch.cond:
10373 // CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
10374 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
10375 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10376 // CHECK14: omp.dispatch.body:
10377 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10378 // CHECK14-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
10379 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10380 // CHECK14: omp.inner.for.cond:
10381 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67:![0-9]+]]
10382 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP67]]
10383 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
10384 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10385 // CHECK14: omp.inner.for.body:
10386 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
10387 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
10388 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10389 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
10390 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP67]]
10391 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64
10392 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10393 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP67]]
10394 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10395 // CHECK14: omp.body.continue:
10396 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10397 // CHECK14: omp.inner.for.inc:
10398 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
10399 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP13]], 1
10400 // CHECK14-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP67]]
10401 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP68:![0-9]+]]
10402 // CHECK14: omp.inner.for.end:
10403 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
10404 // CHECK14: omp.dispatch.inc:
10405 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
10406 // CHECK14: omp.dispatch.end:
10407 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10408 // CHECK14-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
10409 // CHECK14-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10410 // CHECK14: .omp.final.then:
10411 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10412 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10413 // CHECK14: .omp.final.done:
10414 // CHECK14-NEXT: ret void
10417 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
10418 // CHECK14-SAME: (i64 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
10419 // CHECK14-NEXT: entry:
10420 // CHECK14-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8
10421 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10422 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10423 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10424 // CHECK14-NEXT: store i64 [[M]], ptr [[M_ADDR]], align 8
10425 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10426 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10427 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
10428 // CHECK14-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
10429 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
10430 // CHECK14-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
10431 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
10432 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i64 [[TMP3]])
10433 // CHECK14-NEXT: ret void
10436 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
10437 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
10438 // CHECK14-NEXT: entry:
10439 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10440 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10441 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10442 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10443 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10444 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10445 // CHECK14-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
10446 // CHECK14-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
10447 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10448 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10449 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10450 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
10451 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10452 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10453 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10454 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10455 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10456 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
10457 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
10458 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10459 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10460 // CHECK14-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10461 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
10462 // CHECK14-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
10463 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10464 // CHECK14-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
10465 // CHECK14-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
10466 // CHECK14: cond.true:
10467 // CHECK14-NEXT: br label [[COND_END:%.*]]
10468 // CHECK14: cond.false:
10469 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
10470 // CHECK14-NEXT: br label [[COND_END]]
10471 // CHECK14: cond.end:
10472 // CHECK14-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
10473 // CHECK14-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
10474 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
10475 // CHECK14-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
10476 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10477 // CHECK14: omp.inner.for.cond:
10478 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70:![0-9]+]]
10479 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
10480 // CHECK14-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
10481 // CHECK14-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10482 // CHECK14: omp.inner.for.body:
10483 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP70]]
10484 // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
10485 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP70]]
10486 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
10487 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP70]]
10488 // CHECK14-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP70]]
10489 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP70]]
10490 // CHECK14-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP70]]
10491 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10492 // CHECK14: omp.inner.for.inc:
10493 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
10494 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP70]]
10495 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
10496 // CHECK14-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP70]]
10497 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP71:![0-9]+]]
10498 // CHECK14: omp.inner.for.end:
10499 // CHECK14-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
10500 // CHECK14: omp.loop.exit:
10501 // CHECK14-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
10502 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10503 // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
10504 // CHECK14-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10505 // CHECK14: .omp.final.then:
10506 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10507 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10508 // CHECK14: .omp.final.done:
10509 // CHECK14-NEXT: ret void
10512 // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
10513 // CHECK14-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
10514 // CHECK14-NEXT: entry:
10515 // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
10516 // CHECK14-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
10517 // CHECK14-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
10518 // CHECK14-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
10519 // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
10520 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
10521 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
10522 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4
10523 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
10524 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
10525 // CHECK14-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
10526 // CHECK14-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
10527 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4
10528 // CHECK14-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
10529 // CHECK14-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
10530 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10531 // CHECK14-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10532 // CHECK14-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
10533 // CHECK14-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
10534 // CHECK14-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
10535 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
10536 // CHECK14-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
10537 // CHECK14-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
10538 // CHECK14-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
10539 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
10540 // CHECK14-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
10541 // CHECK14-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
10542 // CHECK14-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
10543 // CHECK14-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
10544 // CHECK14-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
10545 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
10546 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10547 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
10548 // CHECK14-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
10549 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
10550 // CHECK14-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
10551 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
10552 // CHECK14: omp.dispatch.cond:
10553 // CHECK14-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
10554 // CHECK14-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
10555 // CHECK14-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
10556 // CHECK14: omp.dispatch.body:
10557 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
10558 // CHECK14-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
10559 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
10560 // CHECK14: omp.inner.for.cond:
10561 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73:![0-9]+]]
10562 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP73]]
10563 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
10564 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
10565 // CHECK14: omp.inner.for.body:
10566 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
10567 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
10568 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
10569 // CHECK14-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
10570 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP73]]
10571 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
10572 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
10573 // CHECK14-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP73]]
10574 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
10575 // CHECK14: omp.body.continue:
10576 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
10577 // CHECK14: omp.inner.for.inc:
10578 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
10579 // CHECK14-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP14]], 1
10580 // CHECK14-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP73]]
10581 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP74:![0-9]+]]
10582 // CHECK14: omp.inner.for.end:
10583 // CHECK14-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
10584 // CHECK14: omp.dispatch.inc:
10585 // CHECK14-NEXT: br label [[OMP_DISPATCH_COND]]
10586 // CHECK14: omp.dispatch.end:
10587 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
10588 // CHECK14-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
10589 // CHECK14-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
10590 // CHECK14: .omp.final.then:
10591 // CHECK14-NEXT: store i32 10, ptr [[I]], align 4
10592 // CHECK14-NEXT: br label [[DOTOMP_FINAL_DONE]]
10593 // CHECK14: .omp.final.done:
10594 // CHECK14-NEXT: ret void
10597 // CHECK14-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
10598 // CHECK14-SAME: () #[[ATTR7:[0-9]+]] {
10599 // CHECK14-NEXT: entry:
10600 // CHECK14-NEXT: call void @__tgt_register_requires(i64 1)
10601 // CHECK14-NEXT: ret void
10604 // CHECK17-LABEL: define {{[^@]+}}@main
10605 // CHECK17-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
10606 // CHECK17-NEXT: entry:
10607 // CHECK17-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
10608 // CHECK17-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
10609 // CHECK17-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 4
10610 // CHECK17-NEXT: [[N:%.*]] = alloca i32, align 4
10611 // CHECK17-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 4
10612 // CHECK17-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
10613 // CHECK17-NEXT: [[M:%.*]] = alloca i32, align 4
10614 // CHECK17-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
10615 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 4
10616 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 4
10617 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 4
10618 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
10619 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
10620 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
10621 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
10622 // CHECK17-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
10623 // CHECK17-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
10624 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 4
10625 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 4
10626 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 4
10627 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
10628 // CHECK17-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
10629 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
10630 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
10631 // CHECK17-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10632 // CHECK17-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
10633 // CHECK17-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
10634 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 4
10635 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 4
10636 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 4
10637 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
10638 // CHECK17-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
10639 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
10640 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
10641 // CHECK17-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10642 // CHECK17-NEXT: [[N_CASTED33:%.*]] = alloca i32, align 4
10643 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 4
10644 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 4
10645 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 4
10646 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 4
10647 // CHECK17-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
10648 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
10649 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
10650 // CHECK17-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10651 // CHECK17-NEXT: [[M_CASTED48:%.*]] = alloca i32, align 4
10652 // CHECK17-NEXT: [[N_CASTED49:%.*]] = alloca i32, align 4
10653 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 4
10654 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 4
10655 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 4
10656 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 4
10657 // CHECK17-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
10658 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
10659 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
10660 // CHECK17-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
10661 // CHECK17-NEXT: store i32 0, ptr [[RETVAL]], align 4
10662 // CHECK17-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
10663 // CHECK17-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4
10664 // CHECK17-NEXT: store i32 100, ptr [[N]], align 4
10665 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
10666 // CHECK17-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0()
10667 // CHECK17-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4
10668 // CHECK17-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
10669 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4
10670 // CHECK17-NEXT: store i32 10, ptr [[M]], align 4
10671 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4
10672 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4
10673 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4
10674 // CHECK17-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
10675 // CHECK17-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
10676 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 24, i1 false)
10677 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10678 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[TMP6]], align 4
10679 // CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10680 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4
10681 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
10682 // CHECK17-NEXT: store ptr null, ptr [[TMP8]], align 4
10683 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
10684 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP9]], align 4
10685 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
10686 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP10]], align 4
10687 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
10688 // CHECK17-NEXT: store ptr null, ptr [[TMP11]], align 4
10689 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
10690 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 4
10691 // CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
10692 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 4
10693 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
10694 // CHECK17-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 4
10695 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
10696 // CHECK17-NEXT: store ptr null, ptr [[TMP15]], align 4
10697 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
10698 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
10699 // CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
10700 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
10701 // CHECK17-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
10702 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
10703 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
10704 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
10705 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
10706 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
10707 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
10708 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
10709 // CHECK17-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
10710 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
10711 // CHECK17-NEXT: store i32 2, ptr [[TMP23]], align 4
10712 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
10713 // CHECK17-NEXT: store i32 3, ptr [[TMP24]], align 4
10714 // CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
10715 // CHECK17-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 4
10716 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
10717 // CHECK17-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 4
10718 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
10719 // CHECK17-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 4
10720 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
10721 // CHECK17-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 4
10722 // CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
10723 // CHECK17-NEXT: store ptr null, ptr [[TMP29]], align 4
10724 // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
10725 // CHECK17-NEXT: store ptr null, ptr [[TMP30]], align 4
10726 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
10727 // CHECK17-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
10728 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
10729 // CHECK17-NEXT: store i64 0, ptr [[TMP32]], align 8
10730 // CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
10731 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
10732 // CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
10733 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
10734 // CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
10735 // CHECK17-NEXT: store i32 0, ptr [[TMP35]], align 4
10736 // CHECK17-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
10737 // CHECK17-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
10738 // CHECK17-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
10739 // CHECK17: omp_offload.failed:
10740 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4:[0-9]+]]
10741 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
10742 // CHECK17: omp_offload.cont:
10743 // CHECK17-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
10744 // CHECK17-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
10745 // CHECK17-NEXT: [[TMP39:%.*]] = load i32, ptr [[N_CASTED3]], align 4
10746 // CHECK17-NEXT: [[TMP40:%.*]] = mul nuw i32 [[TMP0]], 4
10747 // CHECK17-NEXT: [[TMP41:%.*]] = sext i32 [[TMP40]] to i64
10748 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES7]], ptr align 4 @.offload_sizes.1, i32 24, i1 false)
10749 // CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
10750 // CHECK17-NEXT: store i32 [[TMP39]], ptr [[TMP42]], align 4
10751 // CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
10752 // CHECK17-NEXT: store i32 [[TMP39]], ptr [[TMP43]], align 4
10753 // CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
10754 // CHECK17-NEXT: store ptr null, ptr [[TMP44]], align 4
10755 // CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
10756 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP45]], align 4
10757 // CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
10758 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP46]], align 4
10759 // CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
10760 // CHECK17-NEXT: store ptr null, ptr [[TMP47]], align 4
10761 // CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
10762 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 4
10763 // CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
10764 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP49]], align 4
10765 // CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
10766 // CHECK17-NEXT: store i64 [[TMP41]], ptr [[TMP50]], align 4
10767 // CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
10768 // CHECK17-NEXT: store ptr null, ptr [[TMP51]], align 4
10769 // CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
10770 // CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
10771 // CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
10772 // CHECK17-NEXT: [[TMP55:%.*]] = load i32, ptr [[N]], align 4
10773 // CHECK17-NEXT: store i32 [[TMP55]], ptr [[DOTCAPTURE_EXPR_9]], align 4
10774 // CHECK17-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
10775 // CHECK17-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP56]], 0
10776 // CHECK17-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
10777 // CHECK17-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
10778 // CHECK17-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
10779 // CHECK17-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
10780 // CHECK17-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP57]], 1
10781 // CHECK17-NEXT: [[TMP58:%.*]] = zext i32 [[ADD14]] to i64
10782 // CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
10783 // CHECK17-NEXT: store i32 2, ptr [[TMP59]], align 4
10784 // CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
10785 // CHECK17-NEXT: store i32 3, ptr [[TMP60]], align 4
10786 // CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
10787 // CHECK17-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 4
10788 // CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
10789 // CHECK17-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 4
10790 // CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
10791 // CHECK17-NEXT: store ptr [[TMP54]], ptr [[TMP63]], align 4
10792 // CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
10793 // CHECK17-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP64]], align 4
10794 // CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
10795 // CHECK17-NEXT: store ptr null, ptr [[TMP65]], align 4
10796 // CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
10797 // CHECK17-NEXT: store ptr null, ptr [[TMP66]], align 4
10798 // CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
10799 // CHECK17-NEXT: store i64 [[TMP58]], ptr [[TMP67]], align 8
10800 // CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
10801 // CHECK17-NEXT: store i64 0, ptr [[TMP68]], align 8
10802 // CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
10803 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
10804 // CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
10805 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP70]], align 4
10806 // CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
10807 // CHECK17-NEXT: store i32 0, ptr [[TMP71]], align 4
10808 // CHECK17-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
10809 // CHECK17-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0
10810 // CHECK17-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
10811 // CHECK17: omp_offload.failed16:
10812 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP39]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
10813 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT17]]
10814 // CHECK17: omp_offload.cont17:
10815 // CHECK17-NEXT: [[TMP74:%.*]] = load i32, ptr [[M]], align 4
10816 // CHECK17-NEXT: store i32 [[TMP74]], ptr [[M_CASTED]], align 4
10817 // CHECK17-NEXT: [[TMP75:%.*]] = load i32, ptr [[M_CASTED]], align 4
10818 // CHECK17-NEXT: [[TMP76:%.*]] = load i32, ptr [[N]], align 4
10819 // CHECK17-NEXT: store i32 [[TMP76]], ptr [[N_CASTED18]], align 4
10820 // CHECK17-NEXT: [[TMP77:%.*]] = load i32, ptr [[N_CASTED18]], align 4
10821 // CHECK17-NEXT: [[TMP78:%.*]] = mul nuw i32 [[TMP0]], 4
10822 // CHECK17-NEXT: [[TMP79:%.*]] = sext i32 [[TMP78]] to i64
10823 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES22]], ptr align 4 @.offload_sizes.3, i32 32, i1 false)
10824 // CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
10825 // CHECK17-NEXT: store i32 [[TMP75]], ptr [[TMP80]], align 4
10826 // CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
10827 // CHECK17-NEXT: store i32 [[TMP75]], ptr [[TMP81]], align 4
10828 // CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
10829 // CHECK17-NEXT: store ptr null, ptr [[TMP82]], align 4
10830 // CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
10831 // CHECK17-NEXT: store i32 [[TMP77]], ptr [[TMP83]], align 4
10832 // CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
10833 // CHECK17-NEXT: store i32 [[TMP77]], ptr [[TMP84]], align 4
10834 // CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
10835 // CHECK17-NEXT: store ptr null, ptr [[TMP85]], align 4
10836 // CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
10837 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP86]], align 4
10838 // CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
10839 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP87]], align 4
10840 // CHECK17-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
10841 // CHECK17-NEXT: store ptr null, ptr [[TMP88]], align 4
10842 // CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
10843 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP89]], align 4
10844 // CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
10845 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 4
10846 // CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
10847 // CHECK17-NEXT: store i64 [[TMP79]], ptr [[TMP91]], align 4
10848 // CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
10849 // CHECK17-NEXT: store ptr null, ptr [[TMP92]], align 4
10850 // CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
10851 // CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
10852 // CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
10853 // CHECK17-NEXT: [[TMP96:%.*]] = load i32, ptr [[N]], align 4
10854 // CHECK17-NEXT: store i32 [[TMP96]], ptr [[DOTCAPTURE_EXPR_24]], align 4
10855 // CHECK17-NEXT: [[TMP97:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
10856 // CHECK17-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP97]], 0
10857 // CHECK17-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
10858 // CHECK17-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
10859 // CHECK17-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
10860 // CHECK17-NEXT: [[TMP98:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
10861 // CHECK17-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP98]], 1
10862 // CHECK17-NEXT: [[TMP99:%.*]] = zext i32 [[ADD29]] to i64
10863 // CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
10864 // CHECK17-NEXT: store i32 2, ptr [[TMP100]], align 4
10865 // CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
10866 // CHECK17-NEXT: store i32 4, ptr [[TMP101]], align 4
10867 // CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
10868 // CHECK17-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 4
10869 // CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
10870 // CHECK17-NEXT: store ptr [[TMP94]], ptr [[TMP103]], align 4
10871 // CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
10872 // CHECK17-NEXT: store ptr [[TMP95]], ptr [[TMP104]], align 4
10873 // CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
10874 // CHECK17-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP105]], align 4
10875 // CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
10876 // CHECK17-NEXT: store ptr null, ptr [[TMP106]], align 4
10877 // CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
10878 // CHECK17-NEXT: store ptr null, ptr [[TMP107]], align 4
10879 // CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
10880 // CHECK17-NEXT: store i64 [[TMP99]], ptr [[TMP108]], align 8
10881 // CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
10882 // CHECK17-NEXT: store i64 0, ptr [[TMP109]], align 8
10883 // CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
10884 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP110]], align 4
10885 // CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
10886 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP111]], align 4
10887 // CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
10888 // CHECK17-NEXT: store i32 0, ptr [[TMP112]], align 4
10889 // CHECK17-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
10890 // CHECK17-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0
10891 // CHECK17-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
10892 // CHECK17: omp_offload.failed31:
10893 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP75]], i32 [[TMP77]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
10894 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT32]]
10895 // CHECK17: omp_offload.cont32:
10896 // CHECK17-NEXT: [[TMP115:%.*]] = load i32, ptr [[N]], align 4
10897 // CHECK17-NEXT: store i32 [[TMP115]], ptr [[N_CASTED33]], align 4
10898 // CHECK17-NEXT: [[TMP116:%.*]] = load i32, ptr [[N_CASTED33]], align 4
10899 // CHECK17-NEXT: [[TMP117:%.*]] = mul nuw i32 [[TMP0]], 4
10900 // CHECK17-NEXT: [[TMP118:%.*]] = sext i32 [[TMP117]] to i64
10901 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES37]], ptr align 4 @.offload_sizes.5, i32 24, i1 false)
10902 // CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
10903 // CHECK17-NEXT: store i32 [[TMP116]], ptr [[TMP119]], align 4
10904 // CHECK17-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
10905 // CHECK17-NEXT: store i32 [[TMP116]], ptr [[TMP120]], align 4
10906 // CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 0
10907 // CHECK17-NEXT: store ptr null, ptr [[TMP121]], align 4
10908 // CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
10909 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP122]], align 4
10910 // CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
10911 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP123]], align 4
10912 // CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 1
10913 // CHECK17-NEXT: store ptr null, ptr [[TMP124]], align 4
10914 // CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
10915 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP125]], align 4
10916 // CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
10917 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP126]], align 4
10918 // CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
10919 // CHECK17-NEXT: store i64 [[TMP118]], ptr [[TMP127]], align 4
10920 // CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 2
10921 // CHECK17-NEXT: store ptr null, ptr [[TMP128]], align 4
10922 // CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
10923 // CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
10924 // CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
10925 // CHECK17-NEXT: [[TMP132:%.*]] = load i32, ptr [[N]], align 4
10926 // CHECK17-NEXT: store i32 [[TMP132]], ptr [[DOTCAPTURE_EXPR_39]], align 4
10927 // CHECK17-NEXT: [[TMP133:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
10928 // CHECK17-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP133]], 0
10929 // CHECK17-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
10930 // CHECK17-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
10931 // CHECK17-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
10932 // CHECK17-NEXT: [[TMP134:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
10933 // CHECK17-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP134]], 1
10934 // CHECK17-NEXT: [[TMP135:%.*]] = zext i32 [[ADD44]] to i64
10935 // CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
10936 // CHECK17-NEXT: store i32 2, ptr [[TMP136]], align 4
10937 // CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
10938 // CHECK17-NEXT: store i32 3, ptr [[TMP137]], align 4
10939 // CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
10940 // CHECK17-NEXT: store ptr [[TMP129]], ptr [[TMP138]], align 4
10941 // CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
10942 // CHECK17-NEXT: store ptr [[TMP130]], ptr [[TMP139]], align 4
10943 // CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
10944 // CHECK17-NEXT: store ptr [[TMP131]], ptr [[TMP140]], align 4
10945 // CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
10946 // CHECK17-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP141]], align 4
10947 // CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
10948 // CHECK17-NEXT: store ptr null, ptr [[TMP142]], align 4
10949 // CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
10950 // CHECK17-NEXT: store ptr null, ptr [[TMP143]], align 4
10951 // CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
10952 // CHECK17-NEXT: store i64 [[TMP135]], ptr [[TMP144]], align 8
10953 // CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
10954 // CHECK17-NEXT: store i64 0, ptr [[TMP145]], align 8
10955 // CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
10956 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP146]], align 4
10957 // CHECK17-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
10958 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP147]], align 4
10959 // CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
10960 // CHECK17-NEXT: store i32 0, ptr [[TMP148]], align 4
10961 // CHECK17-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
10962 // CHECK17-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0
10963 // CHECK17-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
10964 // CHECK17: omp_offload.failed46:
10965 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP116]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
10966 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT47]]
10967 // CHECK17: omp_offload.cont47:
10968 // CHECK17-NEXT: [[TMP151:%.*]] = load i32, ptr [[M]], align 4
10969 // CHECK17-NEXT: store i32 [[TMP151]], ptr [[M_CASTED48]], align 4
10970 // CHECK17-NEXT: [[TMP152:%.*]] = load i32, ptr [[M_CASTED48]], align 4
10971 // CHECK17-NEXT: [[TMP153:%.*]] = load i32, ptr [[N]], align 4
10972 // CHECK17-NEXT: store i32 [[TMP153]], ptr [[N_CASTED49]], align 4
10973 // CHECK17-NEXT: [[TMP154:%.*]] = load i32, ptr [[N_CASTED49]], align 4
10974 // CHECK17-NEXT: [[TMP155:%.*]] = mul nuw i32 [[TMP0]], 4
10975 // CHECK17-NEXT: [[TMP156:%.*]] = sext i32 [[TMP155]] to i64
10976 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES53]], ptr align 4 @.offload_sizes.7, i32 32, i1 false)
10977 // CHECK17-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
10978 // CHECK17-NEXT: store i32 [[TMP152]], ptr [[TMP157]], align 4
10979 // CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
10980 // CHECK17-NEXT: store i32 [[TMP152]], ptr [[TMP158]], align 4
10981 // CHECK17-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 0
10982 // CHECK17-NEXT: store ptr null, ptr [[TMP159]], align 4
10983 // CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
10984 // CHECK17-NEXT: store i32 [[TMP154]], ptr [[TMP160]], align 4
10985 // CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
10986 // CHECK17-NEXT: store i32 [[TMP154]], ptr [[TMP161]], align 4
10987 // CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 1
10988 // CHECK17-NEXT: store ptr null, ptr [[TMP162]], align 4
10989 // CHECK17-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
10990 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP163]], align 4
10991 // CHECK17-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
10992 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[TMP164]], align 4
10993 // CHECK17-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 2
10994 // CHECK17-NEXT: store ptr null, ptr [[TMP165]], align 4
10995 // CHECK17-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
10996 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP166]], align 4
10997 // CHECK17-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
10998 // CHECK17-NEXT: store ptr [[VLA]], ptr [[TMP167]], align 4
10999 // CHECK17-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
11000 // CHECK17-NEXT: store i64 [[TMP156]], ptr [[TMP168]], align 4
11001 // CHECK17-NEXT: [[TMP169:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 3
11002 // CHECK17-NEXT: store ptr null, ptr [[TMP169]], align 4
11003 // CHECK17-NEXT: [[TMP170:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
11004 // CHECK17-NEXT: [[TMP171:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
11005 // CHECK17-NEXT: [[TMP172:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
11006 // CHECK17-NEXT: [[TMP173:%.*]] = load i32, ptr [[N]], align 4
11007 // CHECK17-NEXT: store i32 [[TMP173]], ptr [[DOTCAPTURE_EXPR_55]], align 4
11008 // CHECK17-NEXT: [[TMP174:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
11009 // CHECK17-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP174]], 0
11010 // CHECK17-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
11011 // CHECK17-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
11012 // CHECK17-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
11013 // CHECK17-NEXT: [[TMP175:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
11014 // CHECK17-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP175]], 1
11015 // CHECK17-NEXT: [[TMP176:%.*]] = zext i32 [[ADD60]] to i64
11016 // CHECK17-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
11017 // CHECK17-NEXT: store i32 2, ptr [[TMP177]], align 4
11018 // CHECK17-NEXT: [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
11019 // CHECK17-NEXT: store i32 4, ptr [[TMP178]], align 4
11020 // CHECK17-NEXT: [[TMP179:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
11021 // CHECK17-NEXT: store ptr [[TMP170]], ptr [[TMP179]], align 4
11022 // CHECK17-NEXT: [[TMP180:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
11023 // CHECK17-NEXT: store ptr [[TMP171]], ptr [[TMP180]], align 4
11024 // CHECK17-NEXT: [[TMP181:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
11025 // CHECK17-NEXT: store ptr [[TMP172]], ptr [[TMP181]], align 4
11026 // CHECK17-NEXT: [[TMP182:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
11027 // CHECK17-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP182]], align 4
11028 // CHECK17-NEXT: [[TMP183:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
11029 // CHECK17-NEXT: store ptr null, ptr [[TMP183]], align 4
11030 // CHECK17-NEXT: [[TMP184:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
11031 // CHECK17-NEXT: store ptr null, ptr [[TMP184]], align 4
11032 // CHECK17-NEXT: [[TMP185:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
11033 // CHECK17-NEXT: store i64 [[TMP176]], ptr [[TMP185]], align 8
11034 // CHECK17-NEXT: [[TMP186:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
11035 // CHECK17-NEXT: store i64 0, ptr [[TMP186]], align 8
11036 // CHECK17-NEXT: [[TMP187:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
11037 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP187]], align 4
11038 // CHECK17-NEXT: [[TMP188:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
11039 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP188]], align 4
11040 // CHECK17-NEXT: [[TMP189:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
11041 // CHECK17-NEXT: store i32 0, ptr [[TMP189]], align 4
11042 // CHECK17-NEXT: [[TMP190:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
11043 // CHECK17-NEXT: [[TMP191:%.*]] = icmp ne i32 [[TMP190]], 0
11044 // CHECK17-NEXT: br i1 [[TMP191]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
11045 // CHECK17: omp_offload.failed62:
11046 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP152]], i32 [[TMP154]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
11047 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT63]]
11048 // CHECK17: omp_offload.cont63:
11049 // CHECK17-NEXT: [[TMP192:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
11050 // CHECK17-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]])
11051 // CHECK17-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
11052 // CHECK17-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4
11053 // CHECK17-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]])
11054 // CHECK17-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4
11055 // CHECK17-NEXT: ret i32 [[TMP194]]
11058 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
11059 // CHECK17-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
11060 // CHECK17-NEXT: entry:
11061 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11062 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11063 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11064 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11065 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11066 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11067 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11068 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11069 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
11070 // CHECK17-NEXT: ret void
11073 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
11074 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3:[0-9]+]] {
11075 // CHECK17-NEXT: entry:
11076 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11077 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11078 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11079 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11080 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11081 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11082 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11083 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11084 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11085 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11086 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11087 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11088 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11089 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11090 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11091 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11092 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11093 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11094 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11095 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11096 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11097 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11098 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11099 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11100 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11101 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11102 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11103 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11104 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11105 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11106 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11107 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11108 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11109 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11110 // CHECK17: omp.precond.then:
11111 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11112 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11113 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11114 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11115 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11116 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11117 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
11118 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11119 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11120 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11121 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
11122 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11123 // CHECK17: cond.true:
11124 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11125 // CHECK17-NEXT: br label [[COND_END:%.*]]
11126 // CHECK17: cond.false:
11127 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11128 // CHECK17-NEXT: br label [[COND_END]]
11129 // CHECK17: cond.end:
11130 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
11131 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11132 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11133 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
11134 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11135 // CHECK17: omp.inner.for.cond:
11136 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]]
11137 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
11138 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
11139 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11140 // CHECK17: omp.inner.for.body:
11141 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP14]]
11142 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
11143 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP14]]
11144 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11145 // CHECK17: omp.inner.for.inc:
11146 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
11147 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP14]]
11148 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11149 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
11150 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
11151 // CHECK17: omp.inner.for.end:
11152 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11153 // CHECK17: omp.loop.exit:
11154 // CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11155 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
11156 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
11157 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11158 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11159 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11160 // CHECK17: .omp.final.then:
11161 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11162 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
11163 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11164 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11165 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11166 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
11167 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11168 // CHECK17: .omp.final.done:
11169 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11170 // CHECK17: omp.precond.end:
11171 // CHECK17-NEXT: ret void
11174 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
11175 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
11176 // CHECK17-NEXT: entry:
11177 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11178 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11179 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11180 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11181 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11182 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11183 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11184 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11185 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11186 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11187 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11188 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11189 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11190 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11191 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11192 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11193 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11194 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11195 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11196 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11197 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11198 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11199 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11200 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11201 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11202 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11203 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11204 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11205 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11206 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11207 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11208 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11209 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11210 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11211 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11212 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11213 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11214 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11215 // CHECK17: omp.precond.then:
11216 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11217 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11218 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11219 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11220 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11221 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11222 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11223 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11224 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11225 // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11226 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
11227 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11228 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11229 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11230 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
11231 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11232 // CHECK17: cond.true:
11233 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11234 // CHECK17-NEXT: br label [[COND_END:%.*]]
11235 // CHECK17: cond.false:
11236 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11237 // CHECK17-NEXT: br label [[COND_END]]
11238 // CHECK17: cond.end:
11239 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
11240 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
11241 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11242 // CHECK17-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
11243 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11244 // CHECK17: omp.inner.for.cond:
11245 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
11246 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
11247 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
11248 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11249 // CHECK17: omp.inner.for.body:
11250 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
11251 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
11252 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11253 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
11254 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
11255 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
11256 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
11257 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11258 // CHECK17: omp.body.continue:
11259 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11260 // CHECK17: omp.inner.for.inc:
11261 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
11262 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
11263 // CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
11264 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
11265 // CHECK17: omp.inner.for.end:
11266 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11267 // CHECK17: omp.loop.exit:
11268 // CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11269 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
11270 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
11271 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11272 // CHECK17-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11273 // CHECK17-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11274 // CHECK17: .omp.final.then:
11275 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11276 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
11277 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11278 // CHECK17-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
11279 // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
11280 // CHECK17-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
11281 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11282 // CHECK17: .omp.final.done:
11283 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11284 // CHECK17: omp.precond.end:
11285 // CHECK17-NEXT: ret void
11288 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
11289 // CHECK17-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11290 // CHECK17-NEXT: entry:
11291 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11292 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11293 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11294 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11295 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11296 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11297 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11298 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11299 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
11300 // CHECK17-NEXT: ret void
11303 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
11304 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
11305 // CHECK17-NEXT: entry:
11306 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11307 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11308 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11309 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11310 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11311 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11312 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11313 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11314 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11315 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11316 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11317 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11318 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11319 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11320 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11321 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11322 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11323 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11324 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11325 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11326 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11327 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11328 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11329 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11330 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11331 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11332 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11333 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11334 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11335 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11336 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11337 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11338 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11339 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11340 // CHECK17: omp.precond.then:
11341 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11342 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11343 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11344 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11345 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11346 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11347 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
11348 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11349 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11350 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11351 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
11352 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11353 // CHECK17: cond.true:
11354 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11355 // CHECK17-NEXT: br label [[COND_END:%.*]]
11356 // CHECK17: cond.false:
11357 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11358 // CHECK17-NEXT: br label [[COND_END]]
11359 // CHECK17: cond.end:
11360 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
11361 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11362 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11363 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
11364 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11365 // CHECK17: omp.inner.for.cond:
11366 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
11367 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
11368 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
11369 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11370 // CHECK17: omp.inner.for.body:
11371 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
11372 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
11373 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP23]]
11374 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11375 // CHECK17: omp.inner.for.inc:
11376 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
11377 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
11378 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11379 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
11380 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
11381 // CHECK17: omp.inner.for.end:
11382 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11383 // CHECK17: omp.loop.exit:
11384 // CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11385 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
11386 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
11387 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11388 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11389 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11390 // CHECK17: .omp.final.then:
11391 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11392 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
11393 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11394 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11395 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11396 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
11397 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11398 // CHECK17: .omp.final.done:
11399 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11400 // CHECK17: omp.precond.end:
11401 // CHECK17-NEXT: ret void
11404 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
11405 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
11406 // CHECK17-NEXT: entry:
11407 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11408 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11409 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11410 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11411 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11412 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11413 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11414 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11415 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11416 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11417 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11418 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11419 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11420 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11421 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11422 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11423 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11424 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11425 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11426 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11427 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11428 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11429 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11430 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11431 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11432 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11433 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11434 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11435 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11436 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11437 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11438 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11439 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11440 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11441 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11442 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11443 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11444 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11445 // CHECK17: omp.precond.then:
11446 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11447 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11448 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11449 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11450 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11451 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11452 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11453 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11454 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11455 // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11456 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
11457 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11458 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11459 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11460 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
11461 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11462 // CHECK17: cond.true:
11463 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11464 // CHECK17-NEXT: br label [[COND_END:%.*]]
11465 // CHECK17: cond.false:
11466 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11467 // CHECK17-NEXT: br label [[COND_END]]
11468 // CHECK17: cond.end:
11469 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
11470 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
11471 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11472 // CHECK17-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
11473 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11474 // CHECK17: omp.inner.for.cond:
11475 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
11476 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
11477 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
11478 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11479 // CHECK17: omp.inner.for.body:
11480 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
11481 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
11482 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11483 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
11484 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
11485 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
11486 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
11487 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11488 // CHECK17: omp.body.continue:
11489 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11490 // CHECK17: omp.inner.for.inc:
11491 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
11492 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
11493 // CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
11494 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
11495 // CHECK17: omp.inner.for.end:
11496 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11497 // CHECK17: omp.loop.exit:
11498 // CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11499 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
11500 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
11501 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11502 // CHECK17-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11503 // CHECK17-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11504 // CHECK17: .omp.final.then:
11505 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11506 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
11507 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
11508 // CHECK17-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
11509 // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
11510 // CHECK17-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
11511 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11512 // CHECK17: .omp.final.done:
11513 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11514 // CHECK17: omp.precond.end:
11515 // CHECK17-NEXT: ret void
11518 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
11519 // CHECK17-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11520 // CHECK17-NEXT: entry:
11521 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
11522 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11523 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11524 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11525 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11526 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11527 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
11528 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11529 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11530 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11531 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11532 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11533 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
11534 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
11535 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11536 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
11537 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
11538 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
11539 // CHECK17-NEXT: ret void
11542 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
11543 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
11544 // CHECK17-NEXT: entry:
11545 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11546 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11547 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11548 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11549 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11550 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11551 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11552 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11553 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11554 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11555 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11556 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11557 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11558 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11559 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11560 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
11561 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
11562 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11563 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11564 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11565 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11566 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11567 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
11568 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11569 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11570 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11571 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11572 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11573 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11574 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11575 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11576 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11577 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
11578 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11579 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11580 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11581 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11582 // CHECK17: omp.precond.then:
11583 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11584 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11585 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11586 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11587 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11588 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
11589 // CHECK17-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11590 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
11591 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
11592 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11593 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11594 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
11595 // CHECK17-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11596 // CHECK17: cond.true:
11597 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11598 // CHECK17-NEXT: br label [[COND_END:%.*]]
11599 // CHECK17: cond.false:
11600 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11601 // CHECK17-NEXT: br label [[COND_END]]
11602 // CHECK17: cond.end:
11603 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
11604 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11605 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11606 // CHECK17-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
11607 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11608 // CHECK17: omp.inner.for.cond:
11609 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
11610 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
11611 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
11612 // CHECK17-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
11613 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11614 // CHECK17: omp.inner.for.body:
11615 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11616 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11617 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP29]]
11618 // CHECK17-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
11619 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
11620 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i32 [[TMP17]], i32 [[TMP18]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP20]]), !llvm.access.group [[ACC_GRP29]]
11621 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11622 // CHECK17: omp.inner.for.inc:
11623 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
11624 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
11625 // CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
11626 // CHECK17-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
11627 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11628 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
11629 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
11630 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11631 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11632 // CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
11633 // CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
11634 // CHECK17-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11635 // CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11636 // CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
11637 // CHECK17-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
11638 // CHECK17-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
11639 // CHECK17: cond.true11:
11640 // CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
11641 // CHECK17-NEXT: br label [[COND_END13:%.*]]
11642 // CHECK17: cond.false12:
11643 // CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11644 // CHECK17-NEXT: br label [[COND_END13]]
11645 // CHECK17: cond.end13:
11646 // CHECK17-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE11]] ], [ [[TMP30]], [[COND_FALSE12]] ]
11647 // CHECK17-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
11648 // CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
11649 // CHECK17-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
11650 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
11651 // CHECK17: omp.inner.for.end:
11652 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11653 // CHECK17: omp.loop.exit:
11654 // CHECK17-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11655 // CHECK17-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
11656 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP33]])
11657 // CHECK17-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11658 // CHECK17-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
11659 // CHECK17-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11660 // CHECK17: .omp.final.then:
11661 // CHECK17-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11662 // CHECK17-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP36]], 0
11663 // CHECK17-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
11664 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
11665 // CHECK17-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
11666 // CHECK17-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
11667 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11668 // CHECK17: .omp.final.done:
11669 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11670 // CHECK17: omp.precond.end:
11671 // CHECK17-NEXT: ret void
11674 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
11675 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
11676 // CHECK17-NEXT: entry:
11677 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11678 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11679 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11680 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11681 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11682 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11683 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11684 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
11685 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11686 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11687 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11688 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
11689 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11690 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11691 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11692 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11693 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11694 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
11695 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11696 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11697 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11698 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11699 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11700 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11701 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11702 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
11703 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11704 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11705 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11706 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11707 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11708 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11709 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11710 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11711 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
11712 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
11713 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11714 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11715 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11716 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11717 // CHECK17: omp.precond.then:
11718 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11719 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11720 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11721 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11722 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11723 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11724 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11725 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11726 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11727 // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11728 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
11729 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11730 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11731 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11732 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
11733 // CHECK17-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11734 // CHECK17: cond.true:
11735 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
11736 // CHECK17-NEXT: br label [[COND_END:%.*]]
11737 // CHECK17: cond.false:
11738 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11739 // CHECK17-NEXT: br label [[COND_END]]
11740 // CHECK17: cond.end:
11741 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
11742 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
11743 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11744 // CHECK17-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
11745 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11746 // CHECK17: omp.inner.for.cond:
11747 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
11748 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
11749 // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
11750 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11751 // CHECK17: omp.inner.for.body:
11752 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
11753 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
11754 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11755 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
11756 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
11757 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
11758 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
11759 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11760 // CHECK17: omp.body.continue:
11761 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11762 // CHECK17: omp.inner.for.inc:
11763 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
11764 // CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
11765 // CHECK17-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
11766 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
11767 // CHECK17: omp.inner.for.end:
11768 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11769 // CHECK17: omp.loop.exit:
11770 // CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11771 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
11772 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
11773 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11774 // CHECK17-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
11775 // CHECK17-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11776 // CHECK17: .omp.final.then:
11777 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11778 // CHECK17-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
11779 // CHECK17-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
11780 // CHECK17-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
11781 // CHECK17-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
11782 // CHECK17-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
11783 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11784 // CHECK17: .omp.final.done:
11785 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11786 // CHECK17: omp.precond.end:
11787 // CHECK17-NEXT: ret void
11790 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
11791 // CHECK17-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
11792 // CHECK17-NEXT: entry:
11793 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
11794 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11795 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11796 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
11797 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11798 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11799 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11800 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11801 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
11802 // CHECK17-NEXT: ret void
11805 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
11806 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
11807 // CHECK17-NEXT: entry:
11808 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11809 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11810 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11811 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11812 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11813 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11814 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11815 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11816 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11817 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11818 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
11819 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
11820 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11821 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11822 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11823 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11824 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11825 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11826 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11827 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11828 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11829 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11830 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11831 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11832 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11833 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11834 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11835 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11836 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11837 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11838 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11839 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11840 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11841 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11842 // CHECK17: omp.precond.then:
11843 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
11844 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11845 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
11846 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11847 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11848 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11849 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
11850 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
11851 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11852 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11853 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
11854 // CHECK17-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
11855 // CHECK17: cond.true:
11856 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11857 // CHECK17-NEXT: br label [[COND_END:%.*]]
11858 // CHECK17: cond.false:
11859 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
11860 // CHECK17-NEXT: br label [[COND_END]]
11861 // CHECK17: cond.end:
11862 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
11863 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
11864 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
11865 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
11866 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11867 // CHECK17: omp.inner.for.cond:
11868 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
11869 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
11870 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
11871 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11872 // CHECK17: omp.inner.for.body:
11873 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
11874 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
11875 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP35]]
11876 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11877 // CHECK17: omp.inner.for.inc:
11878 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
11879 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
11880 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
11881 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
11882 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
11883 // CHECK17: omp.inner.for.end:
11884 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
11885 // CHECK17: omp.loop.exit:
11886 // CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11887 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
11888 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
11889 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
11890 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
11891 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
11892 // CHECK17: .omp.final.then:
11893 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11894 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
11895 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
11896 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
11897 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
11898 // CHECK17-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
11899 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
11900 // CHECK17: .omp.final.done:
11901 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
11902 // CHECK17: omp.precond.end:
11903 // CHECK17-NEXT: ret void
11906 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
11907 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
11908 // CHECK17-NEXT: entry:
11909 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
11910 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
11911 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
11912 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
11913 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
11914 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
11915 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
11916 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
11917 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
11918 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
11919 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
11920 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
11921 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
11922 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
11923 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
11924 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
11925 // CHECK17-NEXT: [[I3:%.*]] = alloca i32, align 4
11926 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
11927 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
11928 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11929 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11930 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
11931 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
11932 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
11933 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
11934 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
11935 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
11936 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
11937 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
11938 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11939 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
11940 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
11941 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
11942 // CHECK17-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
11943 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
11944 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
11945 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
11946 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
11947 // CHECK17: omp.precond.then:
11948 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
11949 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
11950 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
11951 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
11952 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
11953 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
11954 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
11955 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
11956 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
11957 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11958 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
11959 // CHECK17-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11960 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
11961 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 35, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
11962 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
11963 // CHECK17: omp.dispatch.cond:
11964 // CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
11965 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
11966 // CHECK17-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
11967 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
11968 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
11969 // CHECK17: omp.dispatch.body:
11970 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
11971 // CHECK17-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
11972 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
11973 // CHECK17: omp.inner.for.cond:
11974 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
11975 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
11976 // CHECK17-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
11977 // CHECK17-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
11978 // CHECK17: omp.inner.for.body:
11979 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
11980 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
11981 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
11982 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
11983 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
11984 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP20]]
11985 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
11986 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
11987 // CHECK17: omp.body.continue:
11988 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
11989 // CHECK17: omp.inner.for.inc:
11990 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
11991 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP21]], 1
11992 // CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
11993 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
11994 // CHECK17: omp.inner.for.end:
11995 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
11996 // CHECK17: omp.dispatch.inc:
11997 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
11998 // CHECK17: omp.dispatch.end:
11999 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12000 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
12001 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12002 // CHECK17: .omp.final.then:
12003 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
12004 // CHECK17-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
12005 // CHECK17-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
12006 // CHECK17-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
12007 // CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
12008 // CHECK17-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
12009 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12010 // CHECK17: .omp.final.done:
12011 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
12012 // CHECK17: omp.precond.end:
12013 // CHECK17-NEXT: ret void
12016 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
12017 // CHECK17-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
12018 // CHECK17-NEXT: entry:
12019 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
12020 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
12021 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
12022 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12023 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12024 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12025 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
12026 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
12027 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
12028 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12029 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
12030 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12031 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
12032 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
12033 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
12034 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12035 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12036 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
12037 // CHECK17-NEXT: ret void
12040 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
12041 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
12042 // CHECK17-NEXT: entry:
12043 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12044 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12045 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
12046 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
12047 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12048 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12049 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12050 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12051 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12052 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12053 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12054 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12055 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12056 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12057 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12058 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
12059 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12060 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12061 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12062 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
12063 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
12064 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12065 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12066 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
12067 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
12068 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12069 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
12070 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
12071 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12072 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
12073 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12074 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12075 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
12076 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
12077 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12078 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
12079 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12080 // CHECK17: omp.precond.then:
12081 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12082 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12083 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
12084 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12085 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12086 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12087 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
12088 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12089 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12090 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12091 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
12092 // CHECK17-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12093 // CHECK17: cond.true:
12094 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12095 // CHECK17-NEXT: br label [[COND_END:%.*]]
12096 // CHECK17: cond.false:
12097 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12098 // CHECK17-NEXT: br label [[COND_END]]
12099 // CHECK17: cond.end:
12100 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
12101 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12102 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12103 // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
12104 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12105 // CHECK17: omp.inner.for.cond:
12106 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]]
12107 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
12108 // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
12109 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12110 // CHECK17: omp.inner.for.body:
12111 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP41]]
12112 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
12113 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP41]]
12114 // CHECK17-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
12115 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
12116 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP19]]), !llvm.access.group [[ACC_GRP41]]
12117 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12118 // CHECK17: omp.inner.for.inc:
12119 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
12120 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP41]]
12121 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
12122 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
12123 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
12124 // CHECK17: omp.inner.for.end:
12125 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12126 // CHECK17: omp.loop.exit:
12127 // CHECK17-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12128 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
12129 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
12130 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12131 // CHECK17-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
12132 // CHECK17-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12133 // CHECK17: .omp.final.then:
12134 // CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12135 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP26]], 0
12136 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12137 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
12138 // CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
12139 // CHECK17-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
12140 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12141 // CHECK17: .omp.final.done:
12142 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
12143 // CHECK17: omp.precond.end:
12144 // CHECK17-NEXT: ret void
12147 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
12148 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
12149 // CHECK17-NEXT: entry:
12150 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12151 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12152 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12153 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12154 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
12155 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
12156 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12157 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12158 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12159 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12160 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
12161 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
12162 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12163 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12164 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12165 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12166 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12167 // CHECK17-NEXT: [[I4:%.*]] = alloca i32, align 4
12168 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12169 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12170 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12171 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12172 // CHECK17-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
12173 // CHECK17-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
12174 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12175 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12176 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
12177 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
12178 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12179 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
12180 // CHECK17-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
12181 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12182 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
12183 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
12184 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
12185 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
12186 // CHECK17-NEXT: store i32 0, ptr [[I]], align 4
12187 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12188 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
12189 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
12190 // CHECK17: omp.precond.then:
12191 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12192 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
12193 // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
12194 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12195 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12196 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
12197 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
12198 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12199 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12200 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12201 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12202 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12203 // CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12204 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
12205 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 35, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
12206 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
12207 // CHECK17: omp.dispatch.cond:
12208 // CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12209 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
12210 // CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
12211 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
12212 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12213 // CHECK17: omp.dispatch.body:
12214 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12215 // CHECK17-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
12216 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12217 // CHECK17: omp.inner.for.cond:
12218 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]]
12219 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
12220 // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
12221 // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12222 // CHECK17: omp.inner.for.body:
12223 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
12224 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
12225 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12226 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
12227 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
12228 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP21]]
12229 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]]
12230 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12231 // CHECK17: omp.body.continue:
12232 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12233 // CHECK17: omp.inner.for.inc:
12234 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
12235 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP22]], 1
12236 // CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
12237 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
12238 // CHECK17: omp.inner.for.end:
12239 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
12240 // CHECK17: omp.dispatch.inc:
12241 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
12242 // CHECK17: omp.dispatch.end:
12243 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12244 // CHECK17-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
12245 // CHECK17-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12246 // CHECK17: .omp.final.then:
12247 // CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
12248 // CHECK17-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
12249 // CHECK17-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
12250 // CHECK17-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
12251 // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
12252 // CHECK17-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
12253 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12254 // CHECK17: .omp.final.done:
12255 // CHECK17-NEXT: br label [[OMP_PRECOND_END]]
12256 // CHECK17: omp.precond.end:
12257 // CHECK17-NEXT: ret void
12260 // CHECK17-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
12261 // CHECK17-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat {
12262 // CHECK17-NEXT: entry:
12263 // CHECK17-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
12264 // CHECK17-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
12265 // CHECK17-NEXT: [[M:%.*]] = alloca i32, align 4
12266 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
12267 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
12268 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
12269 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12270 // CHECK17-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
12271 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 4
12272 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 4
12273 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 4
12274 // CHECK17-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
12275 // CHECK17-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12276 // CHECK17-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
12277 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 4
12278 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 4
12279 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 4
12280 // CHECK17-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
12281 // CHECK17-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12282 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 4
12283 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 4
12284 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 4
12285 // CHECK17-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
12286 // CHECK17-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12287 // CHECK17-NEXT: [[M_CASTED22:%.*]] = alloca i32, align 4
12288 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 4
12289 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 4
12290 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 4
12291 // CHECK17-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
12292 // CHECK17-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
12293 // CHECK17-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
12294 // CHECK17-NEXT: store i32 10, ptr [[M]], align 4
12295 // CHECK17-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12296 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP0]], align 4
12297 // CHECK17-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12298 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
12299 // CHECK17-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
12300 // CHECK17-NEXT: store ptr null, ptr [[TMP2]], align 4
12301 // CHECK17-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
12302 // CHECK17-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
12303 // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
12304 // CHECK17-NEXT: store i32 2, ptr [[TMP5]], align 4
12305 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
12306 // CHECK17-NEXT: store i32 1, ptr [[TMP6]], align 4
12307 // CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
12308 // CHECK17-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
12309 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
12310 // CHECK17-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
12311 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
12312 // CHECK17-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 4
12313 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
12314 // CHECK17-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 4
12315 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
12316 // CHECK17-NEXT: store ptr null, ptr [[TMP11]], align 4
12317 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
12318 // CHECK17-NEXT: store ptr null, ptr [[TMP12]], align 4
12319 // CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
12320 // CHECK17-NEXT: store i64 10, ptr [[TMP13]], align 8
12321 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
12322 // CHECK17-NEXT: store i64 0, ptr [[TMP14]], align 8
12323 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
12324 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
12325 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
12326 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
12327 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
12328 // CHECK17-NEXT: store i32 0, ptr [[TMP17]], align 4
12329 // CHECK17-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
12330 // CHECK17-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
12331 // CHECK17-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
12332 // CHECK17: omp_offload.failed:
12333 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR4]]
12334 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]]
12335 // CHECK17: omp_offload.cont:
12336 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
12337 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP20]], align 4
12338 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
12339 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP21]], align 4
12340 // CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
12341 // CHECK17-NEXT: store ptr null, ptr [[TMP22]], align 4
12342 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
12343 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
12344 // CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
12345 // CHECK17-NEXT: store i32 2, ptr [[TMP25]], align 4
12346 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
12347 // CHECK17-NEXT: store i32 1, ptr [[TMP26]], align 4
12348 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
12349 // CHECK17-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
12350 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
12351 // CHECK17-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
12352 // CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
12353 // CHECK17-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 4
12354 // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
12355 // CHECK17-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 4
12356 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
12357 // CHECK17-NEXT: store ptr null, ptr [[TMP31]], align 4
12358 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
12359 // CHECK17-NEXT: store ptr null, ptr [[TMP32]], align 4
12360 // CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
12361 // CHECK17-NEXT: store i64 10, ptr [[TMP33]], align 8
12362 // CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
12363 // CHECK17-NEXT: store i64 0, ptr [[TMP34]], align 8
12364 // CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
12365 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
12366 // CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
12367 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
12368 // CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
12369 // CHECK17-NEXT: store i32 0, ptr [[TMP37]], align 4
12370 // CHECK17-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
12371 // CHECK17-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
12372 // CHECK17-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
12373 // CHECK17: omp_offload.failed6:
12374 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR4]]
12375 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT7]]
12376 // CHECK17: omp_offload.cont7:
12377 // CHECK17-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
12378 // CHECK17-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
12379 // CHECK17-NEXT: [[TMP41:%.*]] = load i32, ptr [[M_CASTED]], align 4
12380 // CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
12381 // CHECK17-NEXT: store i32 [[TMP41]], ptr [[TMP42]], align 4
12382 // CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
12383 // CHECK17-NEXT: store i32 [[TMP41]], ptr [[TMP43]], align 4
12384 // CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 0
12385 // CHECK17-NEXT: store ptr null, ptr [[TMP44]], align 4
12386 // CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
12387 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP45]], align 4
12388 // CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
12389 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP46]], align 4
12390 // CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 1
12391 // CHECK17-NEXT: store ptr null, ptr [[TMP47]], align 4
12392 // CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
12393 // CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
12394 // CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
12395 // CHECK17-NEXT: store i32 2, ptr [[TMP50]], align 4
12396 // CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
12397 // CHECK17-NEXT: store i32 2, ptr [[TMP51]], align 4
12398 // CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
12399 // CHECK17-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 4
12400 // CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
12401 // CHECK17-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 4
12402 // CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
12403 // CHECK17-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 4
12404 // CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
12405 // CHECK17-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 4
12406 // CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
12407 // CHECK17-NEXT: store ptr null, ptr [[TMP56]], align 4
12408 // CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
12409 // CHECK17-NEXT: store ptr null, ptr [[TMP57]], align 4
12410 // CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
12411 // CHECK17-NEXT: store i64 10, ptr [[TMP58]], align 8
12412 // CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
12413 // CHECK17-NEXT: store i64 0, ptr [[TMP59]], align 8
12414 // CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
12415 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
12416 // CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
12417 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
12418 // CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
12419 // CHECK17-NEXT: store i32 0, ptr [[TMP62]], align 4
12420 // CHECK17-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
12421 // CHECK17-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
12422 // CHECK17-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
12423 // CHECK17: omp_offload.failed13:
12424 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i32 [[TMP41]], ptr [[A]]) #[[ATTR4]]
12425 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT14]]
12426 // CHECK17: omp_offload.cont14:
12427 // CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
12428 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP65]], align 4
12429 // CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
12430 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP66]], align 4
12431 // CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0
12432 // CHECK17-NEXT: store ptr null, ptr [[TMP67]], align 4
12433 // CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
12434 // CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
12435 // CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
12436 // CHECK17-NEXT: store i32 2, ptr [[TMP70]], align 4
12437 // CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
12438 // CHECK17-NEXT: store i32 1, ptr [[TMP71]], align 4
12439 // CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
12440 // CHECK17-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 4
12441 // CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
12442 // CHECK17-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 4
12443 // CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
12444 // CHECK17-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 4
12445 // CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
12446 // CHECK17-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 4
12447 // CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
12448 // CHECK17-NEXT: store ptr null, ptr [[TMP76]], align 4
12449 // CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
12450 // CHECK17-NEXT: store ptr null, ptr [[TMP77]], align 4
12451 // CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
12452 // CHECK17-NEXT: store i64 10, ptr [[TMP78]], align 8
12453 // CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
12454 // CHECK17-NEXT: store i64 0, ptr [[TMP79]], align 8
12455 // CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
12456 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
12457 // CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
12458 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
12459 // CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
12460 // CHECK17-NEXT: store i32 0, ptr [[TMP82]], align 4
12461 // CHECK17-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
12462 // CHECK17-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
12463 // CHECK17-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
12464 // CHECK17: omp_offload.failed20:
12465 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR4]]
12466 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT21]]
12467 // CHECK17: omp_offload.cont21:
12468 // CHECK17-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
12469 // CHECK17-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
12470 // CHECK17-NEXT: [[TMP86:%.*]] = load i32, ptr [[M_CASTED22]], align 4
12471 // CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
12472 // CHECK17-NEXT: store i32 [[TMP86]], ptr [[TMP87]], align 4
12473 // CHECK17-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
12474 // CHECK17-NEXT: store i32 [[TMP86]], ptr [[TMP88]], align 4
12475 // CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0
12476 // CHECK17-NEXT: store ptr null, ptr [[TMP89]], align 4
12477 // CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
12478 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP90]], align 4
12479 // CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
12480 // CHECK17-NEXT: store ptr [[A]], ptr [[TMP91]], align 4
12481 // CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1
12482 // CHECK17-NEXT: store ptr null, ptr [[TMP92]], align 4
12483 // CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
12484 // CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
12485 // CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
12486 // CHECK17-NEXT: store i32 2, ptr [[TMP95]], align 4
12487 // CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
12488 // CHECK17-NEXT: store i32 2, ptr [[TMP96]], align 4
12489 // CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
12490 // CHECK17-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 4
12491 // CHECK17-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
12492 // CHECK17-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 4
12493 // CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
12494 // CHECK17-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 4
12495 // CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
12496 // CHECK17-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 4
12497 // CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
12498 // CHECK17-NEXT: store ptr null, ptr [[TMP101]], align 4
12499 // CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
12500 // CHECK17-NEXT: store ptr null, ptr [[TMP102]], align 4
12501 // CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
12502 // CHECK17-NEXT: store i64 10, ptr [[TMP103]], align 8
12503 // CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
12504 // CHECK17-NEXT: store i64 0, ptr [[TMP104]], align 8
12505 // CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
12506 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
12507 // CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
12508 // CHECK17-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
12509 // CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
12510 // CHECK17-NEXT: store i32 0, ptr [[TMP107]], align 4
12511 // CHECK17-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
12512 // CHECK17-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
12513 // CHECK17-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
12514 // CHECK17: omp_offload.failed28:
12515 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i32 [[TMP86]], ptr [[A]]) #[[ATTR4]]
12516 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT29]]
12517 // CHECK17: omp_offload.cont29:
12518 // CHECK17-NEXT: ret i32 0
12521 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
12522 // CHECK17-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12523 // CHECK17-NEXT: entry:
12524 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12525 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12526 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12527 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
12528 // CHECK17-NEXT: ret void
12531 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
12532 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
12533 // CHECK17-NEXT: entry:
12534 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12535 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12536 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12537 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12538 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12539 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12540 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12541 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12542 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12543 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12544 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12545 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12546 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12547 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12548 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12549 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
12550 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12551 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12552 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12553 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
12554 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12555 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12556 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
12557 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12558 // CHECK17: cond.true:
12559 // CHECK17-NEXT: br label [[COND_END:%.*]]
12560 // CHECK17: cond.false:
12561 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12562 // CHECK17-NEXT: br label [[COND_END]]
12563 // CHECK17: cond.end:
12564 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12565 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12566 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12567 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
12568 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12569 // CHECK17: omp.inner.for.cond:
12570 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]]
12571 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
12572 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
12573 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12574 // CHECK17: omp.inner.for.body:
12575 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP47]]
12576 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
12577 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP47]]
12578 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12579 // CHECK17: omp.inner.for.inc:
12580 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
12581 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP47]]
12582 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
12583 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
12584 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
12585 // CHECK17: omp.inner.for.end:
12586 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12587 // CHECK17: omp.loop.exit:
12588 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
12589 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12590 // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
12591 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12592 // CHECK17: .omp.final.then:
12593 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12594 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12595 // CHECK17: .omp.final.done:
12596 // CHECK17-NEXT: ret void
12599 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
12600 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
12601 // CHECK17-NEXT: entry:
12602 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12603 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12604 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12605 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12606 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12607 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12608 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12609 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12610 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12611 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12612 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12613 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12614 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12615 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12616 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12617 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12618 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12619 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12620 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12621 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
12622 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12623 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12624 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
12625 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
12626 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12627 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12628 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12629 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
12630 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12631 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12632 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
12633 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12634 // CHECK17: cond.true:
12635 // CHECK17-NEXT: br label [[COND_END:%.*]]
12636 // CHECK17: cond.false:
12637 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12638 // CHECK17-NEXT: br label [[COND_END]]
12639 // CHECK17: cond.end:
12640 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
12641 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
12642 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12643 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
12644 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12645 // CHECK17: omp.inner.for.cond:
12646 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]]
12647 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
12648 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
12649 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12650 // CHECK17: omp.inner.for.body:
12651 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
12652 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
12653 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12654 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
12655 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
12656 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
12657 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP50]]
12658 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12659 // CHECK17: omp.body.continue:
12660 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12661 // CHECK17: omp.inner.for.inc:
12662 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
12663 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
12664 // CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
12665 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
12666 // CHECK17: omp.inner.for.end:
12667 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12668 // CHECK17: omp.loop.exit:
12669 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
12670 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12671 // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
12672 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12673 // CHECK17: .omp.final.then:
12674 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12675 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12676 // CHECK17: .omp.final.done:
12677 // CHECK17-NEXT: ret void
12680 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
12681 // CHECK17-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12682 // CHECK17-NEXT: entry:
12683 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12684 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12685 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12686 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
12687 // CHECK17-NEXT: ret void
12690 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
12691 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
12692 // CHECK17-NEXT: entry:
12693 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12694 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12695 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12696 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12697 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12698 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12699 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12700 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12701 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12702 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12703 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12704 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12705 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12706 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12707 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12708 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
12709 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12710 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12711 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12712 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
12713 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12714 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12715 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
12716 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12717 // CHECK17: cond.true:
12718 // CHECK17-NEXT: br label [[COND_END:%.*]]
12719 // CHECK17: cond.false:
12720 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12721 // CHECK17-NEXT: br label [[COND_END]]
12722 // CHECK17: cond.end:
12723 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12724 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12725 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12726 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
12727 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12728 // CHECK17: omp.inner.for.cond:
12729 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]]
12730 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
12731 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
12732 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12733 // CHECK17: omp.inner.for.body:
12734 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP53]]
12735 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
12736 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP53]]
12737 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12738 // CHECK17: omp.inner.for.inc:
12739 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
12740 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP53]]
12741 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
12742 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
12743 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
12744 // CHECK17: omp.inner.for.end:
12745 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12746 // CHECK17: omp.loop.exit:
12747 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
12748 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12749 // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
12750 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12751 // CHECK17: .omp.final.then:
12752 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12753 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12754 // CHECK17: .omp.final.done:
12755 // CHECK17-NEXT: ret void
12758 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
12759 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
12760 // CHECK17-NEXT: entry:
12761 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12762 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12763 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12764 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12765 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12766 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12767 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12768 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12769 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12770 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12771 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12772 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12773 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12774 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12775 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12776 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12777 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12778 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12779 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12780 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
12781 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12782 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12783 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
12784 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
12785 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12786 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12787 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12788 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
12789 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12790 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12791 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
12792 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12793 // CHECK17: cond.true:
12794 // CHECK17-NEXT: br label [[COND_END:%.*]]
12795 // CHECK17: cond.false:
12796 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12797 // CHECK17-NEXT: br label [[COND_END]]
12798 // CHECK17: cond.end:
12799 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
12800 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
12801 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12802 // CHECK17-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
12803 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12804 // CHECK17: omp.inner.for.cond:
12805 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56:![0-9]+]]
12806 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP56]]
12807 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
12808 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12809 // CHECK17: omp.inner.for.body:
12810 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
12811 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
12812 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12813 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
12814 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
12815 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
12816 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP56]]
12817 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
12818 // CHECK17: omp.body.continue:
12819 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12820 // CHECK17: omp.inner.for.inc:
12821 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
12822 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
12823 // CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
12824 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
12825 // CHECK17: omp.inner.for.end:
12826 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12827 // CHECK17: omp.loop.exit:
12828 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
12829 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12830 // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
12831 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12832 // CHECK17: .omp.final.then:
12833 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12834 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12835 // CHECK17: .omp.final.done:
12836 // CHECK17-NEXT: ret void
12839 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
12840 // CHECK17-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
12841 // CHECK17-NEXT: entry:
12842 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
12843 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12844 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
12845 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12846 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
12847 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12848 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12849 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
12850 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
12851 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
12852 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12853 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
12854 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
12855 // CHECK17-NEXT: ret void
12858 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
12859 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
12860 // CHECK17-NEXT: entry:
12861 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12862 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12863 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12864 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12865 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12866 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12867 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
12868 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
12869 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12870 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12871 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12872 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
12873 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12874 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12875 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12876 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12877 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12878 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
12879 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
12880 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12881 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12882 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12883 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
12884 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
12885 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12886 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
12887 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12888 // CHECK17: cond.true:
12889 // CHECK17-NEXT: br label [[COND_END:%.*]]
12890 // CHECK17: cond.false:
12891 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
12892 // CHECK17-NEXT: br label [[COND_END]]
12893 // CHECK17: cond.end:
12894 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
12895 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
12896 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
12897 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
12898 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12899 // CHECK17: omp.inner.for.cond:
12900 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59:![0-9]+]]
12901 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
12902 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
12903 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12904 // CHECK17: omp.inner.for.body:
12905 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP59]]
12906 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
12907 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP59]]
12908 // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
12909 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
12910 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP59]]
12911 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
12912 // CHECK17: omp.inner.for.inc:
12913 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
12914 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP59]]
12915 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
12916 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
12917 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
12918 // CHECK17: omp.inner.for.end:
12919 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
12920 // CHECK17: omp.loop.exit:
12921 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
12922 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
12923 // CHECK17-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
12924 // CHECK17-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
12925 // CHECK17: .omp.final.then:
12926 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
12927 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
12928 // CHECK17: .omp.final.done:
12929 // CHECK17-NEXT: ret void
12932 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
12933 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
12934 // CHECK17-NEXT: entry:
12935 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
12936 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
12937 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
12938 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
12939 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
12940 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
12941 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
12942 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
12943 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
12944 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
12945 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
12946 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
12947 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
12948 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
12949 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
12950 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12951 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12952 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
12953 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12954 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
12955 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
12956 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
12957 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
12958 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12959 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
12960 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
12961 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
12962 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
12963 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
12964 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
12965 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
12966 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
12967 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
12968 // CHECK17: omp.dispatch.cond:
12969 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12970 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12971 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
12972 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
12973 // CHECK17: cond.true:
12974 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
12975 // CHECK17-NEXT: br label [[COND_END:%.*]]
12976 // CHECK17: cond.false:
12977 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12978 // CHECK17-NEXT: br label [[COND_END]]
12979 // CHECK17: cond.end:
12980 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
12981 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
12982 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
12983 // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
12984 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
12985 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
12986 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
12987 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
12988 // CHECK17: omp.dispatch.body:
12989 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
12990 // CHECK17: omp.inner.for.cond:
12991 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62:![0-9]+]]
12992 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP62]]
12993 // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
12994 // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
12995 // CHECK17: omp.inner.for.body:
12996 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
12997 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
12998 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
12999 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
13000 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
13001 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP16]]
13002 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP62]]
13003 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13004 // CHECK17: omp.body.continue:
13005 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13006 // CHECK17: omp.inner.for.inc:
13007 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
13008 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
13009 // CHECK17-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
13010 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
13011 // CHECK17: omp.inner.for.end:
13012 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
13013 // CHECK17: omp.dispatch.inc:
13014 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13015 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
13016 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
13017 // CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
13018 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13019 // CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
13020 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
13021 // CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
13022 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
13023 // CHECK17: omp.dispatch.end:
13024 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
13025 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13026 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
13027 // CHECK17-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13028 // CHECK17: .omp.final.then:
13029 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13030 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13031 // CHECK17: .omp.final.done:
13032 // CHECK17-NEXT: ret void
13035 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
13036 // CHECK17-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13037 // CHECK17-NEXT: entry:
13038 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13039 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13040 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13041 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
13042 // CHECK17-NEXT: ret void
13045 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
13046 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
13047 // CHECK17-NEXT: entry:
13048 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13049 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13050 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13051 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13052 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13053 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13054 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13055 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13056 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13057 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13058 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13059 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13060 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13061 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13062 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
13063 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
13064 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13065 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13066 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13067 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
13068 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13069 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13070 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13071 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13072 // CHECK17: cond.true:
13073 // CHECK17-NEXT: br label [[COND_END:%.*]]
13074 // CHECK17: cond.false:
13075 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13076 // CHECK17-NEXT: br label [[COND_END]]
13077 // CHECK17: cond.end:
13078 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13079 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
13080 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
13081 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
13082 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13083 // CHECK17: omp.inner.for.cond:
13084 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65:![0-9]+]]
13085 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
13086 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13087 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13088 // CHECK17: omp.inner.for.body:
13089 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP65]]
13090 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
13091 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP65]]
13092 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13093 // CHECK17: omp.inner.for.inc:
13094 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
13095 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP65]]
13096 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
13097 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
13098 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
13099 // CHECK17: omp.inner.for.end:
13100 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13101 // CHECK17: omp.loop.exit:
13102 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
13103 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13104 // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
13105 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13106 // CHECK17: .omp.final.then:
13107 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13108 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13109 // CHECK17: .omp.final.done:
13110 // CHECK17-NEXT: ret void
13113 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
13114 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
13115 // CHECK17-NEXT: entry:
13116 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13117 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13118 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13119 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13120 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13121 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13122 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13123 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13124 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13125 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13126 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13127 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13128 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13129 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13130 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13131 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13132 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13133 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13134 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
13135 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
13136 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13137 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13138 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
13139 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
13140 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13141 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13142 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13143 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13144 // CHECK17-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13145 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
13146 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 35, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
13147 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
13148 // CHECK17: omp.dispatch.cond:
13149 // CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
13150 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
13151 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13152 // CHECK17: omp.dispatch.body:
13153 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13154 // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
13155 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13156 // CHECK17: omp.inner.for.cond:
13157 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68:![0-9]+]]
13158 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP68]]
13159 // CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
13160 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13161 // CHECK17: omp.inner.for.body:
13162 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
13163 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
13164 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13165 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
13166 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
13167 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP12]]
13168 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP68]]
13169 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13170 // CHECK17: omp.body.continue:
13171 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13172 // CHECK17: omp.inner.for.inc:
13173 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
13174 // CHECK17-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
13175 // CHECK17-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
13176 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
13177 // CHECK17: omp.inner.for.end:
13178 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
13179 // CHECK17: omp.dispatch.inc:
13180 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
13181 // CHECK17: omp.dispatch.end:
13182 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13183 // CHECK17-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13184 // CHECK17-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13185 // CHECK17: .omp.final.then:
13186 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13187 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13188 // CHECK17: .omp.final.done:
13189 // CHECK17-NEXT: ret void
13192 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
13193 // CHECK17-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
13194 // CHECK17-NEXT: entry:
13195 // CHECK17-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
13196 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13197 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13198 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13199 // CHECK17-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
13200 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13201 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13202 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
13203 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
13204 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13205 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
13206 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
13207 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
13208 // CHECK17-NEXT: ret void
13211 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
13212 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
13213 // CHECK17-NEXT: entry:
13214 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13215 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13216 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13217 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13218 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13219 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13220 // CHECK17-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13221 // CHECK17-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13222 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13223 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13224 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13225 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
13226 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13227 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13228 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13229 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
13230 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13231 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
13232 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
13233 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13234 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13235 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13236 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
13237 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13238 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13239 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
13240 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13241 // CHECK17: cond.true:
13242 // CHECK17-NEXT: br label [[COND_END:%.*]]
13243 // CHECK17: cond.false:
13244 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13245 // CHECK17-NEXT: br label [[COND_END]]
13246 // CHECK17: cond.end:
13247 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
13248 // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
13249 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
13250 // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
13251 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13252 // CHECK17: omp.inner.for.cond:
13253 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71:![0-9]+]]
13254 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
13255 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
13256 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13257 // CHECK17: omp.inner.for.body:
13258 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP71]]
13259 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
13260 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP71]]
13261 // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
13262 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
13263 // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP71]]
13264 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13265 // CHECK17: omp.inner.for.inc:
13266 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
13267 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP71]]
13268 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
13269 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
13270 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
13271 // CHECK17: omp.inner.for.end:
13272 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13273 // CHECK17: omp.loop.exit:
13274 // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
13275 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13276 // CHECK17-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
13277 // CHECK17-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13278 // CHECK17: .omp.final.then:
13279 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13280 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13281 // CHECK17: .omp.final.done:
13282 // CHECK17-NEXT: ret void
13285 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
13286 // CHECK17-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
13287 // CHECK17-NEXT: entry:
13288 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13289 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13290 // CHECK17-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13291 // CHECK17-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13292 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13293 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
13294 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13295 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4
13296 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13297 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13298 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13299 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13300 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4
13301 // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13302 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13303 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13304 // CHECK17-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13305 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13306 // CHECK17-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
13307 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13308 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
13309 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
13310 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13311 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13312 // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
13313 // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
13314 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13315 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13316 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
13317 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13318 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13319 // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13320 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
13321 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 35, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
13322 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
13323 // CHECK17: omp.dispatch.cond:
13324 // CHECK17-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
13325 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
13326 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
13327 // CHECK17: omp.dispatch.body:
13328 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
13329 // CHECK17-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
13330 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13331 // CHECK17: omp.inner.for.cond:
13332 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74:![0-9]+]]
13333 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP74]]
13334 // CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
13335 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13336 // CHECK17: omp.inner.for.body:
13337 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
13338 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
13339 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
13340 // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
13341 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
13342 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP13]]
13343 // CHECK17-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP74]]
13344 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
13345 // CHECK17: omp.body.continue:
13346 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13347 // CHECK17: omp.inner.for.inc:
13348 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
13349 // CHECK17-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
13350 // CHECK17-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
13351 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
13352 // CHECK17: omp.inner.for.end:
13353 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
13354 // CHECK17: omp.dispatch.inc:
13355 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]]
13356 // CHECK17: omp.dispatch.end:
13357 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13358 // CHECK17-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
13359 // CHECK17-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13360 // CHECK17: .omp.final.then:
13361 // CHECK17-NEXT: store i32 10, ptr [[I]], align 4
13362 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]]
13363 // CHECK17: .omp.final.done:
13364 // CHECK17-NEXT: ret void
13367 // CHECK17-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
13368 // CHECK17-SAME: () #[[ATTR7:[0-9]+]] {
13369 // CHECK17-NEXT: entry:
13370 // CHECK17-NEXT: call void @__tgt_register_requires(i64 1)
13371 // CHECK17-NEXT: ret void
13374 // CHECK19-LABEL: define {{[^@]+}}@main
13375 // CHECK19-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
13376 // CHECK19-NEXT: entry:
13377 // CHECK19-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
13378 // CHECK19-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
13379 // CHECK19-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 4
13380 // CHECK19-NEXT: [[N:%.*]] = alloca i32, align 4
13381 // CHECK19-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 4
13382 // CHECK19-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
13383 // CHECK19-NEXT: [[M:%.*]] = alloca i32, align 4
13384 // CHECK19-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
13385 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x ptr], align 4
13386 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x ptr], align 4
13387 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x ptr], align 4
13388 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4
13389 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
13390 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13391 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13392 // CHECK19-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
13393 // CHECK19-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4
13394 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x ptr], align 4
13395 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x ptr], align 4
13396 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x ptr], align 4
13397 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4
13398 // CHECK19-NEXT: [[_TMP8:%.*]] = alloca i32, align 4
13399 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4
13400 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4
13401 // CHECK19-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13402 // CHECK19-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
13403 // CHECK19-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4
13404 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x ptr], align 4
13405 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x ptr], align 4
13406 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x ptr], align 4
13407 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4
13408 // CHECK19-NEXT: [[_TMP23:%.*]] = alloca i32, align 4
13409 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4
13410 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4
13411 // CHECK19-NEXT: [[KERNEL_ARGS30:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13412 // CHECK19-NEXT: [[N_CASTED33:%.*]] = alloca i32, align 4
13413 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x ptr], align 4
13414 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x ptr], align 4
13415 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x ptr], align 4
13416 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES37:%.*]] = alloca [3 x i64], align 4
13417 // CHECK19-NEXT: [[_TMP38:%.*]] = alloca i32, align 4
13418 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
13419 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
13420 // CHECK19-NEXT: [[KERNEL_ARGS45:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13421 // CHECK19-NEXT: [[M_CASTED48:%.*]] = alloca i32, align 4
13422 // CHECK19-NEXT: [[N_CASTED49:%.*]] = alloca i32, align 4
13423 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x ptr], align 4
13424 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x ptr], align 4
13425 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x ptr], align 4
13426 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES53:%.*]] = alloca [4 x i64], align 4
13427 // CHECK19-NEXT: [[_TMP54:%.*]] = alloca i32, align 4
13428 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4
13429 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4
13430 // CHECK19-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
13431 // CHECK19-NEXT: store i32 0, ptr [[RETVAL]], align 4
13432 // CHECK19-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
13433 // CHECK19-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4
13434 // CHECK19-NEXT: store i32 100, ptr [[N]], align 4
13435 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
13436 // CHECK19-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0()
13437 // CHECK19-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4
13438 // CHECK19-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
13439 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4
13440 // CHECK19-NEXT: store i32 10, ptr [[M]], align 4
13441 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4
13442 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4
13443 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4
13444 // CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4
13445 // CHECK19-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64
13446 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 24, i1 false)
13447 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13448 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[TMP6]], align 4
13449 // CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13450 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4
13451 // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
13452 // CHECK19-NEXT: store ptr null, ptr [[TMP8]], align 4
13453 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
13454 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP9]], align 4
13455 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
13456 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP10]], align 4
13457 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
13458 // CHECK19-NEXT: store ptr null, ptr [[TMP11]], align 4
13459 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
13460 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP12]], align 4
13461 // CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
13462 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP13]], align 4
13463 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 2
13464 // CHECK19-NEXT: store i64 [[TMP5]], ptr [[TMP14]], align 4
13465 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
13466 // CHECK19-NEXT: store ptr null, ptr [[TMP15]], align 4
13467 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
13468 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
13469 // CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0
13470 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4
13471 // CHECK19-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4
13472 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13473 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0
13474 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13475 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13476 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
13477 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13478 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1
13479 // CHECK19-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64
13480 // CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
13481 // CHECK19-NEXT: store i32 2, ptr [[TMP23]], align 4
13482 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
13483 // CHECK19-NEXT: store i32 3, ptr [[TMP24]], align 4
13484 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
13485 // CHECK19-NEXT: store ptr [[TMP16]], ptr [[TMP25]], align 4
13486 // CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
13487 // CHECK19-NEXT: store ptr [[TMP17]], ptr [[TMP26]], align 4
13488 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
13489 // CHECK19-NEXT: store ptr [[TMP18]], ptr [[TMP27]], align 4
13490 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
13491 // CHECK19-NEXT: store ptr @.offload_maptypes, ptr [[TMP28]], align 4
13492 // CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
13493 // CHECK19-NEXT: store ptr null, ptr [[TMP29]], align 4
13494 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
13495 // CHECK19-NEXT: store ptr null, ptr [[TMP30]], align 4
13496 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
13497 // CHECK19-NEXT: store i64 [[TMP22]], ptr [[TMP31]], align 8
13498 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
13499 // CHECK19-NEXT: store i64 0, ptr [[TMP32]], align 8
13500 // CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
13501 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4
13502 // CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
13503 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP34]], align 4
13504 // CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
13505 // CHECK19-NEXT: store i32 0, ptr [[TMP35]], align 4
13506 // CHECK19-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, ptr [[KERNEL_ARGS]])
13507 // CHECK19-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0
13508 // CHECK19-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
13509 // CHECK19: omp_offload.failed:
13510 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4:[0-9]+]]
13511 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
13512 // CHECK19: omp_offload.cont:
13513 // CHECK19-NEXT: [[TMP38:%.*]] = load i32, ptr [[N]], align 4
13514 // CHECK19-NEXT: store i32 [[TMP38]], ptr [[N_CASTED3]], align 4
13515 // CHECK19-NEXT: [[TMP39:%.*]] = load i32, ptr [[N_CASTED3]], align 4
13516 // CHECK19-NEXT: [[TMP40:%.*]] = mul nuw i32 [[TMP0]], 4
13517 // CHECK19-NEXT: [[TMP41:%.*]] = sext i32 [[TMP40]] to i64
13518 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES7]], ptr align 4 @.offload_sizes.1, i32 24, i1 false)
13519 // CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
13520 // CHECK19-NEXT: store i32 [[TMP39]], ptr [[TMP42]], align 4
13521 // CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
13522 // CHECK19-NEXT: store i32 [[TMP39]], ptr [[TMP43]], align 4
13523 // CHECK19-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0
13524 // CHECK19-NEXT: store ptr null, ptr [[TMP44]], align 4
13525 // CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1
13526 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP45]], align 4
13527 // CHECK19-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 1
13528 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP46]], align 4
13529 // CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1
13530 // CHECK19-NEXT: store ptr null, ptr [[TMP47]], align 4
13531 // CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2
13532 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP48]], align 4
13533 // CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 2
13534 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP49]], align 4
13535 // CHECK19-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 2
13536 // CHECK19-NEXT: store i64 [[TMP41]], ptr [[TMP50]], align 4
13537 // CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2
13538 // CHECK19-NEXT: store ptr null, ptr [[TMP51]], align 4
13539 // CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0
13540 // CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0
13541 // CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES7]], i32 0, i32 0
13542 // CHECK19-NEXT: [[TMP55:%.*]] = load i32, ptr [[N]], align 4
13543 // CHECK19-NEXT: store i32 [[TMP55]], ptr [[DOTCAPTURE_EXPR_9]], align 4
13544 // CHECK19-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4
13545 // CHECK19-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP56]], 0
13546 // CHECK19-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1
13547 // CHECK19-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1
13548 // CHECK19-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4
13549 // CHECK19-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4
13550 // CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP57]], 1
13551 // CHECK19-NEXT: [[TMP58:%.*]] = zext i32 [[ADD14]] to i64
13552 // CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 0
13553 // CHECK19-NEXT: store i32 2, ptr [[TMP59]], align 4
13554 // CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 1
13555 // CHECK19-NEXT: store i32 3, ptr [[TMP60]], align 4
13556 // CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 2
13557 // CHECK19-NEXT: store ptr [[TMP52]], ptr [[TMP61]], align 4
13558 // CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 3
13559 // CHECK19-NEXT: store ptr [[TMP53]], ptr [[TMP62]], align 4
13560 // CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 4
13561 // CHECK19-NEXT: store ptr [[TMP54]], ptr [[TMP63]], align 4
13562 // CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 5
13563 // CHECK19-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP64]], align 4
13564 // CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 6
13565 // CHECK19-NEXT: store ptr null, ptr [[TMP65]], align 4
13566 // CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 7
13567 // CHECK19-NEXT: store ptr null, ptr [[TMP66]], align 4
13568 // CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 8
13569 // CHECK19-NEXT: store i64 [[TMP58]], ptr [[TMP67]], align 8
13570 // CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 9
13571 // CHECK19-NEXT: store i64 0, ptr [[TMP68]], align 8
13572 // CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 10
13573 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP69]], align 4
13574 // CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 11
13575 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP70]], align 4
13576 // CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS15]], i32 0, i32 12
13577 // CHECK19-NEXT: store i32 0, ptr [[TMP71]], align 4
13578 // CHECK19-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, ptr [[KERNEL_ARGS15]])
13579 // CHECK19-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0
13580 // CHECK19-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]]
13581 // CHECK19: omp_offload.failed16:
13582 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP39]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
13583 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT17]]
13584 // CHECK19: omp_offload.cont17:
13585 // CHECK19-NEXT: [[TMP74:%.*]] = load i32, ptr [[M]], align 4
13586 // CHECK19-NEXT: store i32 [[TMP74]], ptr [[M_CASTED]], align 4
13587 // CHECK19-NEXT: [[TMP75:%.*]] = load i32, ptr [[M_CASTED]], align 4
13588 // CHECK19-NEXT: [[TMP76:%.*]] = load i32, ptr [[N]], align 4
13589 // CHECK19-NEXT: store i32 [[TMP76]], ptr [[N_CASTED18]], align 4
13590 // CHECK19-NEXT: [[TMP77:%.*]] = load i32, ptr [[N_CASTED18]], align 4
13591 // CHECK19-NEXT: [[TMP78:%.*]] = mul nuw i32 [[TMP0]], 4
13592 // CHECK19-NEXT: [[TMP79:%.*]] = sext i32 [[TMP78]] to i64
13593 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES22]], ptr align 4 @.offload_sizes.3, i32 32, i1 false)
13594 // CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
13595 // CHECK19-NEXT: store i32 [[TMP75]], ptr [[TMP80]], align 4
13596 // CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
13597 // CHECK19-NEXT: store i32 [[TMP75]], ptr [[TMP81]], align 4
13598 // CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0
13599 // CHECK19-NEXT: store ptr null, ptr [[TMP82]], align 4
13600 // CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1
13601 // CHECK19-NEXT: store i32 [[TMP77]], ptr [[TMP83]], align 4
13602 // CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 1
13603 // CHECK19-NEXT: store i32 [[TMP77]], ptr [[TMP84]], align 4
13604 // CHECK19-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1
13605 // CHECK19-NEXT: store ptr null, ptr [[TMP85]], align 4
13606 // CHECK19-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2
13607 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP86]], align 4
13608 // CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 2
13609 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP87]], align 4
13610 // CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2
13611 // CHECK19-NEXT: store ptr null, ptr [[TMP88]], align 4
13612 // CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3
13613 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP89]], align 4
13614 // CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 3
13615 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 4
13616 // CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 3
13617 // CHECK19-NEXT: store i64 [[TMP79]], ptr [[TMP91]], align 4
13618 // CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3
13619 // CHECK19-NEXT: store ptr null, ptr [[TMP92]], align 4
13620 // CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0
13621 // CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS20]], i32 0, i32 0
13622 // CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES22]], i32 0, i32 0
13623 // CHECK19-NEXT: [[TMP96:%.*]] = load i32, ptr [[N]], align 4
13624 // CHECK19-NEXT: store i32 [[TMP96]], ptr [[DOTCAPTURE_EXPR_24]], align 4
13625 // CHECK19-NEXT: [[TMP97:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_24]], align 4
13626 // CHECK19-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP97]], 0
13627 // CHECK19-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1
13628 // CHECK19-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1
13629 // CHECK19-NEXT: store i32 [[SUB28]], ptr [[DOTCAPTURE_EXPR_25]], align 4
13630 // CHECK19-NEXT: [[TMP98:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4
13631 // CHECK19-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP98]], 1
13632 // CHECK19-NEXT: [[TMP99:%.*]] = zext i32 [[ADD29]] to i64
13633 // CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 0
13634 // CHECK19-NEXT: store i32 2, ptr [[TMP100]], align 4
13635 // CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 1
13636 // CHECK19-NEXT: store i32 4, ptr [[TMP101]], align 4
13637 // CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 2
13638 // CHECK19-NEXT: store ptr [[TMP93]], ptr [[TMP102]], align 4
13639 // CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 3
13640 // CHECK19-NEXT: store ptr [[TMP94]], ptr [[TMP103]], align 4
13641 // CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 4
13642 // CHECK19-NEXT: store ptr [[TMP95]], ptr [[TMP104]], align 4
13643 // CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 5
13644 // CHECK19-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP105]], align 4
13645 // CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 6
13646 // CHECK19-NEXT: store ptr null, ptr [[TMP106]], align 4
13647 // CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 7
13648 // CHECK19-NEXT: store ptr null, ptr [[TMP107]], align 4
13649 // CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 8
13650 // CHECK19-NEXT: store i64 [[TMP99]], ptr [[TMP108]], align 8
13651 // CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 9
13652 // CHECK19-NEXT: store i64 0, ptr [[TMP109]], align 8
13653 // CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 10
13654 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP110]], align 4
13655 // CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 11
13656 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP111]], align 4
13657 // CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS30]], i32 0, i32 12
13658 // CHECK19-NEXT: store i32 0, ptr [[TMP112]], align 4
13659 // CHECK19-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, ptr [[KERNEL_ARGS30]])
13660 // CHECK19-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0
13661 // CHECK19-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]]
13662 // CHECK19: omp_offload.failed31:
13663 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP75]], i32 [[TMP77]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
13664 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT32]]
13665 // CHECK19: omp_offload.cont32:
13666 // CHECK19-NEXT: [[TMP115:%.*]] = load i32, ptr [[N]], align 4
13667 // CHECK19-NEXT: store i32 [[TMP115]], ptr [[N_CASTED33]], align 4
13668 // CHECK19-NEXT: [[TMP116:%.*]] = load i32, ptr [[N_CASTED33]], align 4
13669 // CHECK19-NEXT: [[TMP117:%.*]] = mul nuw i32 [[TMP0]], 4
13670 // CHECK19-NEXT: [[TMP118:%.*]] = sext i32 [[TMP117]] to i64
13671 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES37]], ptr align 4 @.offload_sizes.5, i32 24, i1 false)
13672 // CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13673 // CHECK19-NEXT: store i32 [[TMP116]], ptr [[TMP119]], align 4
13674 // CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13675 // CHECK19-NEXT: store i32 [[TMP116]], ptr [[TMP120]], align 4
13676 // CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 0
13677 // CHECK19-NEXT: store ptr null, ptr [[TMP121]], align 4
13678 // CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1
13679 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP122]], align 4
13680 // CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 1
13681 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP123]], align 4
13682 // CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 1
13683 // CHECK19-NEXT: store ptr null, ptr [[TMP124]], align 4
13684 // CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2
13685 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP125]], align 4
13686 // CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 2
13687 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP126]], align 4
13688 // CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 2
13689 // CHECK19-NEXT: store i64 [[TMP118]], ptr [[TMP127]], align 4
13690 // CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS36]], i32 0, i32 2
13691 // CHECK19-NEXT: store ptr null, ptr [[TMP128]], align 4
13692 // CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0
13693 // CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0
13694 // CHECK19-NEXT: [[TMP131:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES37]], i32 0, i32 0
13695 // CHECK19-NEXT: [[TMP132:%.*]] = load i32, ptr [[N]], align 4
13696 // CHECK19-NEXT: store i32 [[TMP132]], ptr [[DOTCAPTURE_EXPR_39]], align 4
13697 // CHECK19-NEXT: [[TMP133:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_39]], align 4
13698 // CHECK19-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP133]], 0
13699 // CHECK19-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1
13700 // CHECK19-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1
13701 // CHECK19-NEXT: store i32 [[SUB43]], ptr [[DOTCAPTURE_EXPR_40]], align 4
13702 // CHECK19-NEXT: [[TMP134:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
13703 // CHECK19-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP134]], 1
13704 // CHECK19-NEXT: [[TMP135:%.*]] = zext i32 [[ADD44]] to i64
13705 // CHECK19-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 0
13706 // CHECK19-NEXT: store i32 2, ptr [[TMP136]], align 4
13707 // CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 1
13708 // CHECK19-NEXT: store i32 3, ptr [[TMP137]], align 4
13709 // CHECK19-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 2
13710 // CHECK19-NEXT: store ptr [[TMP129]], ptr [[TMP138]], align 4
13711 // CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 3
13712 // CHECK19-NEXT: store ptr [[TMP130]], ptr [[TMP139]], align 4
13713 // CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 4
13714 // CHECK19-NEXT: store ptr [[TMP131]], ptr [[TMP140]], align 4
13715 // CHECK19-NEXT: [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 5
13716 // CHECK19-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP141]], align 4
13717 // CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 6
13718 // CHECK19-NEXT: store ptr null, ptr [[TMP142]], align 4
13719 // CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 7
13720 // CHECK19-NEXT: store ptr null, ptr [[TMP143]], align 4
13721 // CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 8
13722 // CHECK19-NEXT: store i64 [[TMP135]], ptr [[TMP144]], align 8
13723 // CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 9
13724 // CHECK19-NEXT: store i64 0, ptr [[TMP145]], align 8
13725 // CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 10
13726 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP146]], align 4
13727 // CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 11
13728 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP147]], align 4
13729 // CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS45]], i32 0, i32 12
13730 // CHECK19-NEXT: store i32 0, ptr [[TMP148]], align 4
13731 // CHECK19-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, ptr [[KERNEL_ARGS45]])
13732 // CHECK19-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0
13733 // CHECK19-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED46:%.*]], label [[OMP_OFFLOAD_CONT47:%.*]]
13734 // CHECK19: omp_offload.failed46:
13735 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP116]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
13736 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT47]]
13737 // CHECK19: omp_offload.cont47:
13738 // CHECK19-NEXT: [[TMP151:%.*]] = load i32, ptr [[M]], align 4
13739 // CHECK19-NEXT: store i32 [[TMP151]], ptr [[M_CASTED48]], align 4
13740 // CHECK19-NEXT: [[TMP152:%.*]] = load i32, ptr [[M_CASTED48]], align 4
13741 // CHECK19-NEXT: [[TMP153:%.*]] = load i32, ptr [[N]], align 4
13742 // CHECK19-NEXT: store i32 [[TMP153]], ptr [[N_CASTED49]], align 4
13743 // CHECK19-NEXT: [[TMP154:%.*]] = load i32, ptr [[N_CASTED49]], align 4
13744 // CHECK19-NEXT: [[TMP155:%.*]] = mul nuw i32 [[TMP0]], 4
13745 // CHECK19-NEXT: [[TMP156:%.*]] = sext i32 [[TMP155]] to i64
13746 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES53]], ptr align 4 @.offload_sizes.7, i32 32, i1 false)
13747 // CHECK19-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13748 // CHECK19-NEXT: store i32 [[TMP152]], ptr [[TMP157]], align 4
13749 // CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13750 // CHECK19-NEXT: store i32 [[TMP152]], ptr [[TMP158]], align 4
13751 // CHECK19-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 0
13752 // CHECK19-NEXT: store ptr null, ptr [[TMP159]], align 4
13753 // CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
13754 // CHECK19-NEXT: store i32 [[TMP154]], ptr [[TMP160]], align 4
13755 // CHECK19-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
13756 // CHECK19-NEXT: store i32 [[TMP154]], ptr [[TMP161]], align 4
13757 // CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 1
13758 // CHECK19-NEXT: store ptr null, ptr [[TMP162]], align 4
13759 // CHECK19-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2
13760 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP163]], align 4
13761 // CHECK19-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 2
13762 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[TMP164]], align 4
13763 // CHECK19-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 2
13764 // CHECK19-NEXT: store ptr null, ptr [[TMP165]], align 4
13765 // CHECK19-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3
13766 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP166]], align 4
13767 // CHECK19-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 3
13768 // CHECK19-NEXT: store ptr [[VLA]], ptr [[TMP167]], align 4
13769 // CHECK19-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 3
13770 // CHECK19-NEXT: store i64 [[TMP156]], ptr [[TMP168]], align 4
13771 // CHECK19-NEXT: [[TMP169:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 3
13772 // CHECK19-NEXT: store ptr null, ptr [[TMP169]], align 4
13773 // CHECK19-NEXT: [[TMP170:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
13774 // CHECK19-NEXT: [[TMP171:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
13775 // CHECK19-NEXT: [[TMP172:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES53]], i32 0, i32 0
13776 // CHECK19-NEXT: [[TMP173:%.*]] = load i32, ptr [[N]], align 4
13777 // CHECK19-NEXT: store i32 [[TMP173]], ptr [[DOTCAPTURE_EXPR_55]], align 4
13778 // CHECK19-NEXT: [[TMP174:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_55]], align 4
13779 // CHECK19-NEXT: [[SUB57:%.*]] = sub nsw i32 [[TMP174]], 0
13780 // CHECK19-NEXT: [[DIV58:%.*]] = sdiv i32 [[SUB57]], 1
13781 // CHECK19-NEXT: [[SUB59:%.*]] = sub nsw i32 [[DIV58]], 1
13782 // CHECK19-NEXT: store i32 [[SUB59]], ptr [[DOTCAPTURE_EXPR_56]], align 4
13783 // CHECK19-NEXT: [[TMP175:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_56]], align 4
13784 // CHECK19-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP175]], 1
13785 // CHECK19-NEXT: [[TMP176:%.*]] = zext i32 [[ADD60]] to i64
13786 // CHECK19-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
13787 // CHECK19-NEXT: store i32 2, ptr [[TMP177]], align 4
13788 // CHECK19-NEXT: [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
13789 // CHECK19-NEXT: store i32 4, ptr [[TMP178]], align 4
13790 // CHECK19-NEXT: [[TMP179:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
13791 // CHECK19-NEXT: store ptr [[TMP170]], ptr [[TMP179]], align 4
13792 // CHECK19-NEXT: [[TMP180:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
13793 // CHECK19-NEXT: store ptr [[TMP171]], ptr [[TMP180]], align 4
13794 // CHECK19-NEXT: [[TMP181:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
13795 // CHECK19-NEXT: store ptr [[TMP172]], ptr [[TMP181]], align 4
13796 // CHECK19-NEXT: [[TMP182:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
13797 // CHECK19-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP182]], align 4
13798 // CHECK19-NEXT: [[TMP183:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
13799 // CHECK19-NEXT: store ptr null, ptr [[TMP183]], align 4
13800 // CHECK19-NEXT: [[TMP184:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
13801 // CHECK19-NEXT: store ptr null, ptr [[TMP184]], align 4
13802 // CHECK19-NEXT: [[TMP185:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
13803 // CHECK19-NEXT: store i64 [[TMP176]], ptr [[TMP185]], align 8
13804 // CHECK19-NEXT: [[TMP186:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
13805 // CHECK19-NEXT: store i64 0, ptr [[TMP186]], align 8
13806 // CHECK19-NEXT: [[TMP187:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
13807 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP187]], align 4
13808 // CHECK19-NEXT: [[TMP188:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
13809 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP188]], align 4
13810 // CHECK19-NEXT: [[TMP189:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
13811 // CHECK19-NEXT: store i32 0, ptr [[TMP189]], align 4
13812 // CHECK19-NEXT: [[TMP190:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, ptr [[KERNEL_ARGS61]])
13813 // CHECK19-NEXT: [[TMP191:%.*]] = icmp ne i32 [[TMP190]], 0
13814 // CHECK19-NEXT: br i1 [[TMP191]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
13815 // CHECK19: omp_offload.failed62:
13816 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP152]], i32 [[TMP154]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR4]]
13817 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT63]]
13818 // CHECK19: omp_offload.cont63:
13819 // CHECK19-NEXT: [[TMP192:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
13820 // CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP192]])
13821 // CHECK19-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
13822 // CHECK19-NEXT: [[TMP193:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4
13823 // CHECK19-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP193]])
13824 // CHECK19-NEXT: [[TMP194:%.*]] = load i32, ptr [[RETVAL]], align 4
13825 // CHECK19-NEXT: ret i32 [[TMP194]]
13828 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154
13829 // CHECK19-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
13830 // CHECK19-NEXT: entry:
13831 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
13832 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
13833 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13834 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
13835 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
13836 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13837 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
13838 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13839 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
13840 // CHECK19-NEXT: ret void
13843 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined
13844 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3:[0-9]+]] {
13845 // CHECK19-NEXT: entry:
13846 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13847 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13848 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
13849 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
13850 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13851 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13852 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
13853 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13854 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13855 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
13856 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
13857 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
13858 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13859 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13860 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
13861 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13862 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13863 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
13864 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
13865 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13866 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
13867 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
13868 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13869 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
13870 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
13871 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13872 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
13873 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13874 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13875 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
13876 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
13877 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13878 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
13879 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13880 // CHECK19: omp.precond.then:
13881 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
13882 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13883 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
13884 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13885 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13886 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13887 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
13888 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13889 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13890 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13891 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
13892 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
13893 // CHECK19: cond.true:
13894 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13895 // CHECK19-NEXT: br label [[COND_END:%.*]]
13896 // CHECK19: cond.false:
13897 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
13898 // CHECK19-NEXT: br label [[COND_END]]
13899 // CHECK19: cond.end:
13900 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
13901 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
13902 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
13903 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
13904 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
13905 // CHECK19: omp.inner.for.cond:
13906 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]]
13907 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
13908 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
13909 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
13910 // CHECK19: omp.inner.for.body:
13911 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP14]]
13912 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP14]]
13913 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP14]]
13914 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
13915 // CHECK19: omp.inner.for.inc:
13916 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
13917 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP14]]
13918 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
13919 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]]
13920 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]]
13921 // CHECK19: omp.inner.for.end:
13922 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
13923 // CHECK19: omp.loop.exit:
13924 // CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13925 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
13926 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
13927 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
13928 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
13929 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
13930 // CHECK19: .omp.final.then:
13931 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13932 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
13933 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
13934 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
13935 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
13936 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
13937 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
13938 // CHECK19: .omp.final.done:
13939 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
13940 // CHECK19: omp.precond.end:
13941 // CHECK19-NEXT: ret void
13944 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.omp_outlined.omp_outlined
13945 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
13946 // CHECK19-NEXT: entry:
13947 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
13948 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
13949 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
13950 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
13951 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
13952 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
13953 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
13954 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
13955 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
13956 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
13957 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
13958 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
13959 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
13960 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
13961 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
13962 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
13963 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
13964 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
13965 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
13966 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13967 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13968 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
13969 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
13970 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
13971 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
13972 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
13973 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
13974 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
13975 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
13976 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13977 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
13978 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
13979 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
13980 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
13981 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
13982 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
13983 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
13984 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
13985 // CHECK19: omp.precond.then:
13986 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
13987 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
13988 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
13989 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
13990 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
13991 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
13992 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
13993 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
13994 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
13995 // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
13996 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
13997 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
13998 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
13999 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14000 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
14001 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14002 // CHECK19: cond.true:
14003 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14004 // CHECK19-NEXT: br label [[COND_END:%.*]]
14005 // CHECK19: cond.false:
14006 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14007 // CHECK19-NEXT: br label [[COND_END]]
14008 // CHECK19: cond.end:
14009 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
14010 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
14011 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14012 // CHECK19-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
14013 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14014 // CHECK19: omp.inner.for.cond:
14015 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
14016 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
14017 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
14018 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14019 // CHECK19: omp.inner.for.body:
14020 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
14021 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
14022 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14023 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
14024 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
14025 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
14026 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
14027 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14028 // CHECK19: omp.body.continue:
14029 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14030 // CHECK19: omp.inner.for.inc:
14031 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
14032 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
14033 // CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
14034 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
14035 // CHECK19: omp.inner.for.end:
14036 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14037 // CHECK19: omp.loop.exit:
14038 // CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14039 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
14040 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
14041 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14042 // CHECK19-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14043 // CHECK19-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14044 // CHECK19: .omp.final.then:
14045 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14046 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
14047 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14048 // CHECK19-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
14049 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
14050 // CHECK19-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
14051 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14052 // CHECK19: .omp.final.done:
14053 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14054 // CHECK19: omp.precond.end:
14055 // CHECK19-NEXT: ret void
14058 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159
14059 // CHECK19-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14060 // CHECK19-NEXT: entry:
14061 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14062 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14063 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14064 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14065 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14066 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14067 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14068 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14069 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
14070 // CHECK19-NEXT: ret void
14073 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined
14074 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
14075 // CHECK19-NEXT: entry:
14076 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14077 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14078 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14079 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14080 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14081 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14082 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14083 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14084 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14085 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14086 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14087 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14088 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14089 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14090 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14091 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14092 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14093 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14094 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14095 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14096 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14097 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14098 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14099 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14100 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14101 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14102 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14103 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14104 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14105 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14106 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14107 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14108 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14109 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14110 // CHECK19: omp.precond.then:
14111 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14112 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14113 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14114 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14115 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14116 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14117 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
14118 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14119 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14120 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14121 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
14122 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14123 // CHECK19: cond.true:
14124 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14125 // CHECK19-NEXT: br label [[COND_END:%.*]]
14126 // CHECK19: cond.false:
14127 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14128 // CHECK19-NEXT: br label [[COND_END]]
14129 // CHECK19: cond.end:
14130 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
14131 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14132 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14133 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
14134 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14135 // CHECK19: omp.inner.for.cond:
14136 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]]
14137 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
14138 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
14139 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14140 // CHECK19: omp.inner.for.body:
14141 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP23]]
14142 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP23]]
14143 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP23]]
14144 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14145 // CHECK19: omp.inner.for.inc:
14146 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
14147 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP23]]
14148 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
14149 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]]
14150 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]]
14151 // CHECK19: omp.inner.for.end:
14152 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14153 // CHECK19: omp.loop.exit:
14154 // CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14155 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
14156 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
14157 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14158 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
14159 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14160 // CHECK19: .omp.final.then:
14161 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14162 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
14163 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14164 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14165 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14166 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
14167 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14168 // CHECK19: .omp.final.done:
14169 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14170 // CHECK19: omp.precond.end:
14171 // CHECK19-NEXT: ret void
14174 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.omp_outlined.omp_outlined
14175 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
14176 // CHECK19-NEXT: entry:
14177 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14178 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14179 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14180 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14181 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14182 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14183 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14184 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14185 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14186 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14187 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14188 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14189 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14190 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14191 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14192 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14193 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14194 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14195 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14196 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14197 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14198 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14199 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14200 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14201 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14202 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14203 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14204 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14205 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14206 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14207 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14208 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14209 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14210 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14211 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14212 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14213 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14214 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14215 // CHECK19: omp.precond.then:
14216 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14217 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14218 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14219 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14220 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14221 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14222 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14223 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14224 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14225 // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14226 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
14227 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14228 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14229 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14230 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
14231 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14232 // CHECK19: cond.true:
14233 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14234 // CHECK19-NEXT: br label [[COND_END:%.*]]
14235 // CHECK19: cond.false:
14236 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14237 // CHECK19-NEXT: br label [[COND_END]]
14238 // CHECK19: cond.end:
14239 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
14240 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
14241 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14242 // CHECK19-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
14243 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14244 // CHECK19: omp.inner.for.cond:
14245 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]]
14246 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]]
14247 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
14248 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14249 // CHECK19: omp.inner.for.body:
14250 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
14251 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
14252 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14253 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
14254 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP26]]
14255 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
14256 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]]
14257 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14258 // CHECK19: omp.body.continue:
14259 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14260 // CHECK19: omp.inner.for.inc:
14261 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
14262 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], 1
14263 // CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]]
14264 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]]
14265 // CHECK19: omp.inner.for.end:
14266 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14267 // CHECK19: omp.loop.exit:
14268 // CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14269 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
14270 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
14271 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14272 // CHECK19-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14273 // CHECK19-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14274 // CHECK19: .omp.final.then:
14275 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14276 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
14277 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14278 // CHECK19-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
14279 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
14280 // CHECK19-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
14281 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14282 // CHECK19: .omp.final.done:
14283 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14284 // CHECK19: omp.precond.end:
14285 // CHECK19-NEXT: ret void
14288 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164
14289 // CHECK19-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14290 // CHECK19-NEXT: entry:
14291 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
14292 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14293 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14294 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14295 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14296 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14297 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
14298 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14299 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14300 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14301 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14302 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14303 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
14304 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
14305 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14306 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14307 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14308 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
14309 // CHECK19-NEXT: ret void
14312 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined
14313 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
14314 // CHECK19-NEXT: entry:
14315 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14316 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14317 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14318 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14319 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14320 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14321 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14322 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14323 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14324 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14325 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14326 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14327 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14328 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14329 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14330 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14331 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14332 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14333 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14334 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14335 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14336 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14337 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14338 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14339 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14340 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14341 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14342 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14343 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14344 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14345 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14346 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14347 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14348 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14349 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14350 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14351 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14352 // CHECK19: omp.precond.then:
14353 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14354 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14355 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14356 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14357 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14358 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14359 // CHECK19-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14360 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
14361 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]])
14362 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14363 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14364 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
14365 // CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14366 // CHECK19: cond.true:
14367 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14368 // CHECK19-NEXT: br label [[COND_END:%.*]]
14369 // CHECK19: cond.false:
14370 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14371 // CHECK19-NEXT: br label [[COND_END]]
14372 // CHECK19: cond.end:
14373 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
14374 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14375 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14376 // CHECK19-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
14377 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14378 // CHECK19: omp.inner.for.cond:
14379 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]]
14380 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
14381 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
14382 // CHECK19-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
14383 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14384 // CHECK19: omp.inner.for.body:
14385 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14386 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14387 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP29]]
14388 // CHECK19-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
14389 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP29]]
14390 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined, i32 [[TMP17]], i32 [[TMP18]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP20]]), !llvm.access.group [[ACC_GRP29]]
14391 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14392 // CHECK19: omp.inner.for.inc:
14393 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
14394 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
14395 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
14396 // CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
14397 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14398 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
14399 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP23]], [[TMP24]]
14400 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14401 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14402 // CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP29]]
14403 // CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
14404 // CHECK19-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14405 // CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14406 // CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
14407 // CHECK19-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]]
14408 // CHECK19-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
14409 // CHECK19: cond.true11:
14410 // CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP29]]
14411 // CHECK19-NEXT: br label [[COND_END13:%.*]]
14412 // CHECK19: cond.false12:
14413 // CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14414 // CHECK19-NEXT: br label [[COND_END13]]
14415 // CHECK19: cond.end13:
14416 // CHECK19-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE11]] ], [ [[TMP30]], [[COND_FALSE12]] ]
14417 // CHECK19-NEXT: store i32 [[COND14]], ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP29]]
14418 // CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP29]]
14419 // CHECK19-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]]
14420 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]]
14421 // CHECK19: omp.inner.for.end:
14422 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14423 // CHECK19: omp.loop.exit:
14424 // CHECK19-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14425 // CHECK19-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4
14426 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP33]])
14427 // CHECK19-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14428 // CHECK19-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0
14429 // CHECK19-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14430 // CHECK19: .omp.final.then:
14431 // CHECK19-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14432 // CHECK19-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP36]], 0
14433 // CHECK19-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
14434 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV16]], 1
14435 // CHECK19-NEXT: [[ADD17:%.*]] = add nsw i32 0, [[MUL]]
14436 // CHECK19-NEXT: store i32 [[ADD17]], ptr [[I4]], align 4
14437 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14438 // CHECK19: .omp.final.done:
14439 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14440 // CHECK19: omp.precond.end:
14441 // CHECK19-NEXT: ret void
14444 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.omp_outlined.omp_outlined
14445 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
14446 // CHECK19-NEXT: entry:
14447 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14448 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14449 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14450 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14451 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14452 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14453 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14454 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14455 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14456 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14457 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14458 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14459 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14460 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14461 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14462 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14463 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14464 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14465 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14466 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14467 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14468 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14469 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14470 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14471 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14472 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14473 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14474 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14475 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14476 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14477 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14478 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14479 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14480 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14481 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14482 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14483 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14484 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14485 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14486 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14487 // CHECK19: omp.precond.then:
14488 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14489 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14490 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14491 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14492 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14493 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14494 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14495 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14496 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14497 // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14498 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
14499 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14500 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14501 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14502 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
14503 // CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14504 // CHECK19: cond.true:
14505 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14506 // CHECK19-NEXT: br label [[COND_END:%.*]]
14507 // CHECK19: cond.false:
14508 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14509 // CHECK19-NEXT: br label [[COND_END]]
14510 // CHECK19: cond.end:
14511 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ]
14512 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
14513 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14514 // CHECK19-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4
14515 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14516 // CHECK19: omp.inner.for.cond:
14517 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]]
14518 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]]
14519 // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
14520 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14521 // CHECK19: omp.inner.for.body:
14522 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
14523 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
14524 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14525 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
14526 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP32]]
14527 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP19]]
14528 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]]
14529 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14530 // CHECK19: omp.body.continue:
14531 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14532 // CHECK19: omp.inner.for.inc:
14533 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
14534 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
14535 // CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]]
14536 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]]
14537 // CHECK19: omp.inner.for.end:
14538 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14539 // CHECK19: omp.loop.exit:
14540 // CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14541 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
14542 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
14543 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14544 // CHECK19-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
14545 // CHECK19-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14546 // CHECK19: .omp.final.then:
14547 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14548 // CHECK19-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP25]], 0
14549 // CHECK19-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1
14550 // CHECK19-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1
14551 // CHECK19-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
14552 // CHECK19-NEXT: store i32 [[ADD11]], ptr [[I4]], align 4
14553 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14554 // CHECK19: .omp.final.done:
14555 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14556 // CHECK19: omp.precond.end:
14557 // CHECK19-NEXT: ret void
14560 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169
14561 // CHECK19-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14562 // CHECK19-NEXT: entry:
14563 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14564 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14565 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14566 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14567 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14568 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14569 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14570 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14571 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]])
14572 // CHECK19-NEXT: ret void
14575 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined
14576 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
14577 // CHECK19-NEXT: entry:
14578 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14579 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14580 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14581 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14582 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14583 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14584 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14585 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14586 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14587 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14588 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14589 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14590 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14591 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14592 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14593 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14594 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14595 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14596 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14597 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14598 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14599 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14600 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14601 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14602 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14603 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14604 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14605 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14606 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14607 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14608 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14609 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14610 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14611 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14612 // CHECK19: omp.precond.then:
14613 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14614 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14615 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14616 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14617 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14618 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14619 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
14620 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14621 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14622 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14623 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
14624 // CHECK19-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14625 // CHECK19: cond.true:
14626 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14627 // CHECK19-NEXT: br label [[COND_END:%.*]]
14628 // CHECK19: cond.false:
14629 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14630 // CHECK19-NEXT: br label [[COND_END]]
14631 // CHECK19: cond.end:
14632 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
14633 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14634 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14635 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
14636 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14637 // CHECK19: omp.inner.for.cond:
14638 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]]
14639 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
14640 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
14641 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14642 // CHECK19: omp.inner.for.body:
14643 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]]
14644 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]]
14645 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP35]]
14646 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14647 // CHECK19: omp.inner.for.inc:
14648 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
14649 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]]
14650 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
14651 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]]
14652 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]]
14653 // CHECK19: omp.inner.for.end:
14654 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14655 // CHECK19: omp.loop.exit:
14656 // CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14657 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
14658 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
14659 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14660 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
14661 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14662 // CHECK19: .omp.final.then:
14663 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14664 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
14665 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14666 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV7]], 1
14667 // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 0, [[MUL]]
14668 // CHECK19-NEXT: store i32 [[ADD8]], ptr [[I3]], align 4
14669 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14670 // CHECK19: .omp.final.done:
14671 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14672 // CHECK19: omp.precond.end:
14673 // CHECK19-NEXT: ret void
14676 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.omp_outlined.omp_outlined
14677 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
14678 // CHECK19-NEXT: entry:
14679 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14680 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14681 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14682 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14683 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14684 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14685 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14686 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14687 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14688 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14689 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14690 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14691 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14692 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14693 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14694 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14695 // CHECK19-NEXT: [[I3:%.*]] = alloca i32, align 4
14696 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14697 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14698 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14699 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14700 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14701 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14702 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14703 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14704 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14705 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14706 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14707 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
14708 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14709 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14710 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14711 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
14712 // CHECK19-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14713 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14714 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14715 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14716 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14717 // CHECK19: omp.precond.then:
14718 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14719 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14720 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14721 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14722 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14723 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14724 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14725 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14726 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14727 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14728 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14729 // CHECK19-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14730 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
14731 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP12]], i32 1073741859, i32 [[TMP9]], i32 [[TMP10]], i32 1, i32 1)
14732 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
14733 // CHECK19: omp.dispatch.cond:
14734 // CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14735 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
14736 // CHECK19-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP14]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
14737 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP15]], 0
14738 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14739 // CHECK19: omp.dispatch.body:
14740 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14741 // CHECK19-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4
14742 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14743 // CHECK19: omp.inner.for.cond:
14744 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]]
14745 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]]
14746 // CHECK19-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]]
14747 // CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14748 // CHECK19: omp.inner.for.body:
14749 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
14750 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1
14751 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14752 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
14753 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP38]]
14754 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP20]]
14755 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]]
14756 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
14757 // CHECK19: omp.body.continue:
14758 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14759 // CHECK19: omp.inner.for.inc:
14760 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
14761 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP21]], 1
14762 // CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]]
14763 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]]
14764 // CHECK19: omp.inner.for.end:
14765 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
14766 // CHECK19: omp.dispatch.inc:
14767 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
14768 // CHECK19: omp.dispatch.end:
14769 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14770 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
14771 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14772 // CHECK19: .omp.final.then:
14773 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14774 // CHECK19-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP24]], 0
14775 // CHECK19-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
14776 // CHECK19-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
14777 // CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
14778 // CHECK19-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
14779 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14780 // CHECK19: .omp.final.done:
14781 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14782 // CHECK19: omp.precond.end:
14783 // CHECK19-NEXT: ret void
14786 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174
14787 // CHECK19-SAME: (i32 noundef [[M:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
14788 // CHECK19-NEXT: entry:
14789 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
14790 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
14791 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14792 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14793 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
14794 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14795 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
14796 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
14797 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14798 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14799 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14800 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14801 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR]], align 4
14802 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
14803 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
14804 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14805 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
14806 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined, ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]])
14807 // CHECK19-NEXT: ret void
14810 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined
14811 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
14812 // CHECK19-NEXT: entry:
14813 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14814 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14815 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14816 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14817 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14818 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14819 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14820 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14821 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14822 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14823 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14824 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
14825 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
14826 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14827 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14828 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14829 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
14830 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14831 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14832 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14833 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14834 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14835 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14836 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14837 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14838 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14839 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14840 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14841 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14842 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14843 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14844 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14845 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14846 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14847 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14848 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14849 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14850 // CHECK19: omp.precond.then:
14851 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
14852 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14853 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_COMB_UB]], align 4
14854 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14855 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14856 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14857 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
14858 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
14859 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14860 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14861 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
14862 // CHECK19-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
14863 // CHECK19: cond.true:
14864 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14865 // CHECK19-NEXT: br label [[COND_END:%.*]]
14866 // CHECK19: cond.false:
14867 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
14868 // CHECK19-NEXT: br label [[COND_END]]
14869 // CHECK19: cond.end:
14870 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
14871 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
14872 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
14873 // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
14874 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14875 // CHECK19: omp.inner.for.cond:
14876 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]]
14877 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
14878 // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
14879 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14880 // CHECK19: omp.inner.for.body:
14881 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP41]]
14882 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP41]]
14883 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP41]]
14884 // CHECK19-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
14885 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP41]]
14886 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 6, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined, i32 [[TMP16]], i32 [[TMP17]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], i32 [[TMP19]]), !llvm.access.group [[ACC_GRP41]]
14887 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
14888 // CHECK19: omp.inner.for.inc:
14889 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
14890 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP41]]
14891 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
14892 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]]
14893 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]]
14894 // CHECK19: omp.inner.for.end:
14895 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
14896 // CHECK19: omp.loop.exit:
14897 // CHECK19-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14898 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
14899 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
14900 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
14901 // CHECK19-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
14902 // CHECK19-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
14903 // CHECK19: .omp.final.then:
14904 // CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14905 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP26]], 0
14906 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
14907 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV8]], 1
14908 // CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL]]
14909 // CHECK19-NEXT: store i32 [[ADD9]], ptr [[I4]], align 4
14910 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
14911 // CHECK19: .omp.final.done:
14912 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
14913 // CHECK19: omp.precond.end:
14914 // CHECK19-NEXT: ret void
14917 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.omp_outlined.omp_outlined
14918 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
14919 // CHECK19-NEXT: entry:
14920 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
14921 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
14922 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
14923 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
14924 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
14925 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4
14926 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
14927 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
14928 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
14929 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
14930 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
14931 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
14932 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
14933 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
14934 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
14935 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
14936 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
14937 // CHECK19-NEXT: [[I4:%.*]] = alloca i32, align 4
14938 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
14939 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
14940 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14941 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14942 // CHECK19-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
14943 // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4
14944 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
14945 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14946 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4
14947 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4
14948 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4
14949 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
14950 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4
14951 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14952 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
14953 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
14954 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
14955 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4
14956 // CHECK19-NEXT: store i32 0, ptr [[I]], align 4
14957 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
14958 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
14959 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
14960 // CHECK19: omp.precond.then:
14961 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
14962 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
14963 // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
14964 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
14965 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
14966 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_LB]], align 4
14967 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4
14968 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
14969 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
14970 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
14971 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14972 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
14973 // CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14974 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
14975 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP13]], i32 1073741859, i32 [[TMP10]], i32 [[TMP11]], i32 1, i32 [[TMP9]])
14976 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
14977 // CHECK19: omp.dispatch.cond:
14978 // CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
14979 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
14980 // CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP15]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
14981 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP16]], 0
14982 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
14983 // CHECK19: omp.dispatch.body:
14984 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
14985 // CHECK19-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4
14986 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
14987 // CHECK19: omp.inner.for.cond:
14988 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]]
14989 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP44]]
14990 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
14991 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
14992 // CHECK19: omp.inner.for.body:
14993 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
14994 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP20]], 1
14995 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
14996 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
14997 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP44]]
14998 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP21]]
14999 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]]
15000 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15001 // CHECK19: omp.body.continue:
15002 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15003 // CHECK19: omp.inner.for.inc:
15004 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
15005 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP22]], 1
15006 // CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]]
15007 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]]
15008 // CHECK19: omp.inner.for.end:
15009 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
15010 // CHECK19: omp.dispatch.inc:
15011 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
15012 // CHECK19: omp.dispatch.end:
15013 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15014 // CHECK19-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0
15015 // CHECK19-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15016 // CHECK19: .omp.final.then:
15017 // CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
15018 // CHECK19-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP25]], 0
15019 // CHECK19-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
15020 // CHECK19-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
15021 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
15022 // CHECK19-NEXT: store i32 [[ADD10]], ptr [[I4]], align 4
15023 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15024 // CHECK19: .omp.final.done:
15025 // CHECK19-NEXT: br label [[OMP_PRECOND_END]]
15026 // CHECK19: omp.precond.end:
15027 // CHECK19-NEXT: ret void
15030 // CHECK19-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
15031 // CHECK19-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR6:[0-9]+]] comdat {
15032 // CHECK19-NEXT: entry:
15033 // CHECK19-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
15034 // CHECK19-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
15035 // CHECK19-NEXT: [[M:%.*]] = alloca i32, align 4
15036 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
15037 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
15038 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
15039 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15040 // CHECK19-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
15041 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 4
15042 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 4
15043 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 4
15044 // CHECK19-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
15045 // CHECK19-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15046 // CHECK19-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4
15047 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 4
15048 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 4
15049 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 4
15050 // CHECK19-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
15051 // CHECK19-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15052 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 4
15053 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 4
15054 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 4
15055 // CHECK19-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
15056 // CHECK19-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15057 // CHECK19-NEXT: [[M_CASTED22:%.*]] = alloca i32, align 4
15058 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x ptr], align 4
15059 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x ptr], align 4
15060 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x ptr], align 4
15061 // CHECK19-NEXT: [[_TMP26:%.*]] = alloca i32, align 4
15062 // CHECK19-NEXT: [[KERNEL_ARGS27:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
15063 // CHECK19-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
15064 // CHECK19-NEXT: store i32 10, ptr [[M]], align 4
15065 // CHECK19-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15066 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP0]], align 4
15067 // CHECK19-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15068 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP1]], align 4
15069 // CHECK19-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
15070 // CHECK19-NEXT: store ptr null, ptr [[TMP2]], align 4
15071 // CHECK19-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
15072 // CHECK19-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
15073 // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
15074 // CHECK19-NEXT: store i32 2, ptr [[TMP5]], align 4
15075 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
15076 // CHECK19-NEXT: store i32 1, ptr [[TMP6]], align 4
15077 // CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
15078 // CHECK19-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
15079 // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
15080 // CHECK19-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
15081 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
15082 // CHECK19-NEXT: store ptr @.offload_sizes.9, ptr [[TMP9]], align 4
15083 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
15084 // CHECK19-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP10]], align 4
15085 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
15086 // CHECK19-NEXT: store ptr null, ptr [[TMP11]], align 4
15087 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
15088 // CHECK19-NEXT: store ptr null, ptr [[TMP12]], align 4
15089 // CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
15090 // CHECK19-NEXT: store i64 10, ptr [[TMP13]], align 8
15091 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
15092 // CHECK19-NEXT: store i64 0, ptr [[TMP14]], align 8
15093 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
15094 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
15095 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
15096 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
15097 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
15098 // CHECK19-NEXT: store i32 0, ptr [[TMP17]], align 4
15099 // CHECK19-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, ptr [[KERNEL_ARGS]])
15100 // CHECK19-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
15101 // CHECK19-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
15102 // CHECK19: omp_offload.failed:
15103 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122(ptr [[A]]) #[[ATTR4]]
15104 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]]
15105 // CHECK19: omp_offload.cont:
15106 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
15107 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP20]], align 4
15108 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
15109 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP21]], align 4
15110 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
15111 // CHECK19-NEXT: store ptr null, ptr [[TMP22]], align 4
15112 // CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
15113 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
15114 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
15115 // CHECK19-NEXT: store i32 2, ptr [[TMP25]], align 4
15116 // CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
15117 // CHECK19-NEXT: store i32 1, ptr [[TMP26]], align 4
15118 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
15119 // CHECK19-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
15120 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
15121 // CHECK19-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
15122 // CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
15123 // CHECK19-NEXT: store ptr @.offload_sizes.11, ptr [[TMP29]], align 4
15124 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
15125 // CHECK19-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP30]], align 4
15126 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
15127 // CHECK19-NEXT: store ptr null, ptr [[TMP31]], align 4
15128 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
15129 // CHECK19-NEXT: store ptr null, ptr [[TMP32]], align 4
15130 // CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
15131 // CHECK19-NEXT: store i64 10, ptr [[TMP33]], align 8
15132 // CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
15133 // CHECK19-NEXT: store i64 0, ptr [[TMP34]], align 8
15134 // CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
15135 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
15136 // CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
15137 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
15138 // CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
15139 // CHECK19-NEXT: store i32 0, ptr [[TMP37]], align 4
15140 // CHECK19-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, ptr [[KERNEL_ARGS5]])
15141 // CHECK19-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
15142 // CHECK19-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
15143 // CHECK19: omp_offload.failed6:
15144 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127(ptr [[A]]) #[[ATTR4]]
15145 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT7]]
15146 // CHECK19: omp_offload.cont7:
15147 // CHECK19-NEXT: [[TMP40:%.*]] = load i32, ptr [[M]], align 4
15148 // CHECK19-NEXT: store i32 [[TMP40]], ptr [[M_CASTED]], align 4
15149 // CHECK19-NEXT: [[TMP41:%.*]] = load i32, ptr [[M_CASTED]], align 4
15150 // CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
15151 // CHECK19-NEXT: store i32 [[TMP41]], ptr [[TMP42]], align 4
15152 // CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
15153 // CHECK19-NEXT: store i32 [[TMP41]], ptr [[TMP43]], align 4
15154 // CHECK19-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 0
15155 // CHECK19-NEXT: store ptr null, ptr [[TMP44]], align 4
15156 // CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
15157 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP45]], align 4
15158 // CHECK19-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
15159 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP46]], align 4
15160 // CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 1
15161 // CHECK19-NEXT: store ptr null, ptr [[TMP47]], align 4
15162 // CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
15163 // CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
15164 // CHECK19-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
15165 // CHECK19-NEXT: store i32 2, ptr [[TMP50]], align 4
15166 // CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
15167 // CHECK19-NEXT: store i32 2, ptr [[TMP51]], align 4
15168 // CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
15169 // CHECK19-NEXT: store ptr [[TMP48]], ptr [[TMP52]], align 4
15170 // CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
15171 // CHECK19-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 4
15172 // CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
15173 // CHECK19-NEXT: store ptr @.offload_sizes.13, ptr [[TMP54]], align 4
15174 // CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
15175 // CHECK19-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP55]], align 4
15176 // CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
15177 // CHECK19-NEXT: store ptr null, ptr [[TMP56]], align 4
15178 // CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
15179 // CHECK19-NEXT: store ptr null, ptr [[TMP57]], align 4
15180 // CHECK19-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
15181 // CHECK19-NEXT: store i64 10, ptr [[TMP58]], align 8
15182 // CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
15183 // CHECK19-NEXT: store i64 0, ptr [[TMP59]], align 8
15184 // CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
15185 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP60]], align 4
15186 // CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
15187 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
15188 // CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
15189 // CHECK19-NEXT: store i32 0, ptr [[TMP62]], align 4
15190 // CHECK19-NEXT: [[TMP63:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, ptr [[KERNEL_ARGS12]])
15191 // CHECK19-NEXT: [[TMP64:%.*]] = icmp ne i32 [[TMP63]], 0
15192 // CHECK19-NEXT: br i1 [[TMP64]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
15193 // CHECK19: omp_offload.failed13:
15194 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132(i32 [[TMP41]], ptr [[A]]) #[[ATTR4]]
15195 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT14]]
15196 // CHECK19: omp_offload.cont14:
15197 // CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
15198 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP65]], align 4
15199 // CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
15200 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP66]], align 4
15201 // CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0
15202 // CHECK19-NEXT: store ptr null, ptr [[TMP67]], align 4
15203 // CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
15204 // CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
15205 // CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
15206 // CHECK19-NEXT: store i32 2, ptr [[TMP70]], align 4
15207 // CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
15208 // CHECK19-NEXT: store i32 1, ptr [[TMP71]], align 4
15209 // CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
15210 // CHECK19-NEXT: store ptr [[TMP68]], ptr [[TMP72]], align 4
15211 // CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
15212 // CHECK19-NEXT: store ptr [[TMP69]], ptr [[TMP73]], align 4
15213 // CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
15214 // CHECK19-NEXT: store ptr @.offload_sizes.15, ptr [[TMP74]], align 4
15215 // CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
15216 // CHECK19-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP75]], align 4
15217 // CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
15218 // CHECK19-NEXT: store ptr null, ptr [[TMP76]], align 4
15219 // CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
15220 // CHECK19-NEXT: store ptr null, ptr [[TMP77]], align 4
15221 // CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
15222 // CHECK19-NEXT: store i64 10, ptr [[TMP78]], align 8
15223 // CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
15224 // CHECK19-NEXT: store i64 0, ptr [[TMP79]], align 8
15225 // CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
15226 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP80]], align 4
15227 // CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
15228 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP81]], align 4
15229 // CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
15230 // CHECK19-NEXT: store i32 0, ptr [[TMP82]], align 4
15231 // CHECK19-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, ptr [[KERNEL_ARGS19]])
15232 // CHECK19-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0
15233 // CHECK19-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
15234 // CHECK19: omp_offload.failed20:
15235 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137(ptr [[A]]) #[[ATTR4]]
15236 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT21]]
15237 // CHECK19: omp_offload.cont21:
15238 // CHECK19-NEXT: [[TMP85:%.*]] = load i32, ptr [[M]], align 4
15239 // CHECK19-NEXT: store i32 [[TMP85]], ptr [[M_CASTED22]], align 4
15240 // CHECK19-NEXT: [[TMP86:%.*]] = load i32, ptr [[M_CASTED22]], align 4
15241 // CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
15242 // CHECK19-NEXT: store i32 [[TMP86]], ptr [[TMP87]], align 4
15243 // CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
15244 // CHECK19-NEXT: store i32 [[TMP86]], ptr [[TMP88]], align 4
15245 // CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0
15246 // CHECK19-NEXT: store ptr null, ptr [[TMP89]], align 4
15247 // CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1
15248 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP90]], align 4
15249 // CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 1
15250 // CHECK19-NEXT: store ptr [[A]], ptr [[TMP91]], align 4
15251 // CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1
15252 // CHECK19-NEXT: store ptr null, ptr [[TMP92]], align 4
15253 // CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0
15254 // CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS24]], i32 0, i32 0
15255 // CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 0
15256 // CHECK19-NEXT: store i32 2, ptr [[TMP95]], align 4
15257 // CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 1
15258 // CHECK19-NEXT: store i32 2, ptr [[TMP96]], align 4
15259 // CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 2
15260 // CHECK19-NEXT: store ptr [[TMP93]], ptr [[TMP97]], align 4
15261 // CHECK19-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 3
15262 // CHECK19-NEXT: store ptr [[TMP94]], ptr [[TMP98]], align 4
15263 // CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 4
15264 // CHECK19-NEXT: store ptr @.offload_sizes.17, ptr [[TMP99]], align 4
15265 // CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 5
15266 // CHECK19-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP100]], align 4
15267 // CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 6
15268 // CHECK19-NEXT: store ptr null, ptr [[TMP101]], align 4
15269 // CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 7
15270 // CHECK19-NEXT: store ptr null, ptr [[TMP102]], align 4
15271 // CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 8
15272 // CHECK19-NEXT: store i64 10, ptr [[TMP103]], align 8
15273 // CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 9
15274 // CHECK19-NEXT: store i64 0, ptr [[TMP104]], align 8
15275 // CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 10
15276 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP105]], align 4
15277 // CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 11
15278 // CHECK19-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP106]], align 4
15279 // CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS27]], i32 0, i32 12
15280 // CHECK19-NEXT: store i32 0, ptr [[TMP107]], align 4
15281 // CHECK19-NEXT: [[TMP108:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, ptr [[KERNEL_ARGS27]])
15282 // CHECK19-NEXT: [[TMP109:%.*]] = icmp ne i32 [[TMP108]], 0
15283 // CHECK19-NEXT: br i1 [[TMP109]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]]
15284 // CHECK19: omp_offload.failed28:
15285 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142(i32 [[TMP86]], ptr [[A]]) #[[ATTR4]]
15286 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT29]]
15287 // CHECK19: omp_offload.cont29:
15288 // CHECK19-NEXT: ret i32 0
15291 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122
15292 // CHECK19-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15293 // CHECK19-NEXT: entry:
15294 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15295 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15296 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15297 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined, ptr [[TMP0]])
15298 // CHECK19-NEXT: ret void
15301 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined
15302 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
15303 // CHECK19-NEXT: entry:
15304 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15305 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15306 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15307 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15308 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15309 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15310 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15311 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15312 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15313 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15314 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15315 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15316 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15317 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15318 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15319 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15320 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15321 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15322 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15323 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15324 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15325 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15326 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15327 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15328 // CHECK19: cond.true:
15329 // CHECK19-NEXT: br label [[COND_END:%.*]]
15330 // CHECK19: cond.false:
15331 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15332 // CHECK19-NEXT: br label [[COND_END]]
15333 // CHECK19: cond.end:
15334 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15335 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15336 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15337 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15338 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15339 // CHECK19: omp.inner.for.cond:
15340 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]]
15341 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
15342 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15343 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15344 // CHECK19: omp.inner.for.body:
15345 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP47]]
15346 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]]
15347 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP47]]
15348 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15349 // CHECK19: omp.inner.for.inc:
15350 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
15351 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP47]]
15352 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
15353 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]]
15354 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]]
15355 // CHECK19: omp.inner.for.end:
15356 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15357 // CHECK19: omp.loop.exit:
15358 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15359 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15360 // CHECK19-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
15361 // CHECK19-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15362 // CHECK19: .omp.final.then:
15363 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15364 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15365 // CHECK19: .omp.final.done:
15366 // CHECK19-NEXT: ret void
15369 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.omp_outlined.omp_outlined
15370 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
15371 // CHECK19-NEXT: entry:
15372 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15373 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15374 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15375 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15376 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15377 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15378 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15379 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15380 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15381 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15382 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15383 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15384 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15385 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15386 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15387 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15388 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15389 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15390 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15391 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15392 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15393 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15394 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15395 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15396 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15397 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15398 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15399 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
15400 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15401 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15402 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
15403 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15404 // CHECK19: cond.true:
15405 // CHECK19-NEXT: br label [[COND_END:%.*]]
15406 // CHECK19: cond.false:
15407 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15408 // CHECK19-NEXT: br label [[COND_END]]
15409 // CHECK19: cond.end:
15410 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
15411 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
15412 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15413 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
15414 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15415 // CHECK19: omp.inner.for.cond:
15416 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]]
15417 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP50]]
15418 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
15419 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15420 // CHECK19: omp.inner.for.body:
15421 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
15422 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
15423 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15424 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
15425 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]]
15426 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
15427 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP50]]
15428 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15429 // CHECK19: omp.body.continue:
15430 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15431 // CHECK19: omp.inner.for.inc:
15432 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
15433 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
15434 // CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]]
15435 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]]
15436 // CHECK19: omp.inner.for.end:
15437 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15438 // CHECK19: omp.loop.exit:
15439 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
15440 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15441 // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
15442 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15443 // CHECK19: .omp.final.then:
15444 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15445 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15446 // CHECK19: .omp.final.done:
15447 // CHECK19-NEXT: ret void
15450 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127
15451 // CHECK19-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15452 // CHECK19-NEXT: entry:
15453 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15454 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15455 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15456 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined, ptr [[TMP0]])
15457 // CHECK19-NEXT: ret void
15460 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined
15461 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
15462 // CHECK19-NEXT: entry:
15463 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15464 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15465 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15466 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15467 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15468 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15469 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15470 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15471 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15472 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15473 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15474 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15475 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15476 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15477 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15478 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15479 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15480 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15481 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15482 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15483 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15484 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15485 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15486 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15487 // CHECK19: cond.true:
15488 // CHECK19-NEXT: br label [[COND_END:%.*]]
15489 // CHECK19: cond.false:
15490 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15491 // CHECK19-NEXT: br label [[COND_END]]
15492 // CHECK19: cond.end:
15493 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15494 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15495 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15496 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15497 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15498 // CHECK19: omp.inner.for.cond:
15499 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]]
15500 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
15501 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15502 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15503 // CHECK19: omp.inner.for.body:
15504 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP53]]
15505 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP53]]
15506 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP53]]
15507 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15508 // CHECK19: omp.inner.for.inc:
15509 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
15510 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP53]]
15511 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
15512 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]]
15513 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]]
15514 // CHECK19: omp.inner.for.end:
15515 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15516 // CHECK19: omp.loop.exit:
15517 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15518 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15519 // CHECK19-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
15520 // CHECK19-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15521 // CHECK19: .omp.final.then:
15522 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15523 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15524 // CHECK19: .omp.final.done:
15525 // CHECK19-NEXT: ret void
15528 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.omp_outlined.omp_outlined
15529 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
15530 // CHECK19-NEXT: entry:
15531 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15532 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15533 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15534 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15535 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15536 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15537 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15538 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15539 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15540 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15541 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15542 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15543 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15544 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15545 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15546 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15547 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15548 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15549 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15550 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15551 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15552 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15553 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15554 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15555 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15556 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15557 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15558 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
15559 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15560 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15561 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 9
15562 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15563 // CHECK19: cond.true:
15564 // CHECK19-NEXT: br label [[COND_END:%.*]]
15565 // CHECK19: cond.false:
15566 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15567 // CHECK19-NEXT: br label [[COND_END]]
15568 // CHECK19: cond.end:
15569 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ]
15570 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
15571 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15572 // CHECK19-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
15573 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15574 // CHECK19: omp.inner.for.cond:
15575 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56:![0-9]+]]
15576 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP56]]
15577 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
15578 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15579 // CHECK19: omp.inner.for.body:
15580 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
15581 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
15582 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15583 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
15584 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP56]]
15585 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
15586 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP56]]
15587 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15588 // CHECK19: omp.body.continue:
15589 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15590 // CHECK19: omp.inner.for.inc:
15591 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
15592 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1
15593 // CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP56]]
15594 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]]
15595 // CHECK19: omp.inner.for.end:
15596 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15597 // CHECK19: omp.loop.exit:
15598 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]])
15599 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15600 // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
15601 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15602 // CHECK19: .omp.final.then:
15603 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15604 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15605 // CHECK19: .omp.final.done:
15606 // CHECK19-NEXT: ret void
15609 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132
15610 // CHECK19-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15611 // CHECK19-NEXT: entry:
15612 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
15613 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15614 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15615 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15616 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
15617 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15618 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15619 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
15620 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
15621 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
15622 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15623 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15624 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
15625 // CHECK19-NEXT: ret void
15628 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined
15629 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
15630 // CHECK19-NEXT: entry:
15631 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15632 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15633 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15634 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15635 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15636 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15637 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15638 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15639 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15640 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15641 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15642 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15643 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15644 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15645 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15646 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
15647 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15648 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15649 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15650 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15651 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15652 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15653 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15654 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15655 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15656 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15657 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15658 // CHECK19: cond.true:
15659 // CHECK19-NEXT: br label [[COND_END:%.*]]
15660 // CHECK19: cond.false:
15661 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15662 // CHECK19-NEXT: br label [[COND_END]]
15663 // CHECK19: cond.end:
15664 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15665 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15666 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15667 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15668 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15669 // CHECK19: omp.inner.for.cond:
15670 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59:![0-9]+]]
15671 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
15672 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15673 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15674 // CHECK19: omp.inner.for.body:
15675 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP59]]
15676 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP59]]
15677 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP59]]
15678 // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
15679 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP59]]
15680 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP59]]
15681 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15682 // CHECK19: omp.inner.for.inc:
15683 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
15684 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP59]]
15685 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
15686 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP59]]
15687 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]]
15688 // CHECK19: omp.inner.for.end:
15689 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15690 // CHECK19: omp.loop.exit:
15691 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15692 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15693 // CHECK19-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
15694 // CHECK19-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15695 // CHECK19: .omp.final.then:
15696 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15697 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15698 // CHECK19: .omp.final.done:
15699 // CHECK19-NEXT: ret void
15702 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.omp_outlined.omp_outlined
15703 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
15704 // CHECK19-NEXT: entry:
15705 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15706 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15707 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15708 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15709 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15710 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15711 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15712 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15713 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15714 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15715 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15716 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15717 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15718 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15719 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15720 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15721 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15722 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15723 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
15724 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15725 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15726 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15727 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15728 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15729 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15730 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15731 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15732 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15733 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
15734 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15735 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
15736 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP5]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP3]])
15737 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
15738 // CHECK19: omp.dispatch.cond:
15739 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15740 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15741 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]]
15742 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15743 // CHECK19: cond.true:
15744 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15745 // CHECK19-NEXT: br label [[COND_END:%.*]]
15746 // CHECK19: cond.false:
15747 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15748 // CHECK19-NEXT: br label [[COND_END]]
15749 // CHECK19: cond.end:
15750 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ]
15751 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
15752 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15753 // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4
15754 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
15755 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15756 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]]
15757 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15758 // CHECK19: omp.dispatch.body:
15759 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15760 // CHECK19: omp.inner.for.cond:
15761 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62:![0-9]+]]
15762 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP62]]
15763 // CHECK19-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
15764 // CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15765 // CHECK19: omp.inner.for.body:
15766 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
15767 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
15768 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15769 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
15770 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP62]]
15771 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP16]]
15772 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP62]]
15773 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15774 // CHECK19: omp.body.continue:
15775 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15776 // CHECK19: omp.inner.for.inc:
15777 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
15778 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP17]], 1
15779 // CHECK19-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP62]]
15780 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]]
15781 // CHECK19: omp.inner.for.end:
15782 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
15783 // CHECK19: omp.dispatch.inc:
15784 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15785 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
15786 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
15787 // CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4
15788 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15789 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
15790 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
15791 // CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4
15792 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
15793 // CHECK19: omp.dispatch.end:
15794 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]])
15795 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15796 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0
15797 // CHECK19-NEXT: br i1 [[TMP23]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15798 // CHECK19: .omp.final.then:
15799 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15800 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15801 // CHECK19: .omp.final.done:
15802 // CHECK19-NEXT: ret void
15805 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137
15806 // CHECK19-SAME: (ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15807 // CHECK19-NEXT: entry:
15808 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15809 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15810 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15811 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined, ptr [[TMP0]])
15812 // CHECK19-NEXT: ret void
15815 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined
15816 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
15817 // CHECK19-NEXT: entry:
15818 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15819 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15820 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15821 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15822 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15823 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15824 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15825 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15826 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15827 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15828 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15829 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15830 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15831 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15832 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
15833 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
15834 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15835 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15836 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15837 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
15838 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
15839 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15840 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
15841 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
15842 // CHECK19: cond.true:
15843 // CHECK19-NEXT: br label [[COND_END:%.*]]
15844 // CHECK19: cond.false:
15845 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
15846 // CHECK19-NEXT: br label [[COND_END]]
15847 // CHECK19: cond.end:
15848 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
15849 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
15850 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
15851 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
15852 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15853 // CHECK19: omp.inner.for.cond:
15854 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65:![0-9]+]]
15855 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
15856 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
15857 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15858 // CHECK19: omp.inner.for.body:
15859 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP65]]
15860 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP65]]
15861 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP65]]
15862 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15863 // CHECK19: omp.inner.for.inc:
15864 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
15865 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP65]]
15866 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
15867 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP65]]
15868 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP66:![0-9]+]]
15869 // CHECK19: omp.inner.for.end:
15870 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
15871 // CHECK19: omp.loop.exit:
15872 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
15873 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15874 // CHECK19-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0
15875 // CHECK19-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15876 // CHECK19: .omp.final.then:
15877 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15878 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15879 // CHECK19: .omp.final.done:
15880 // CHECK19-NEXT: ret void
15883 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.omp_outlined.omp_outlined
15884 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] {
15885 // CHECK19-NEXT: entry:
15886 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15887 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15888 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
15889 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
15890 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15891 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15892 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15893 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
15894 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
15895 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15896 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15897 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15898 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15899 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15900 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15901 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15902 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15903 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15904 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
15905 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
15906 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
15907 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
15908 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
15909 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
15910 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
15911 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
15912 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15913 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
15914 // CHECK19-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
15915 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
15916 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP6]], i32 1073741859, i32 [[TMP3]], i32 [[TMP4]], i32 1, i32 1)
15917 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
15918 // CHECK19: omp.dispatch.cond:
15919 // CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP6]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
15920 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0
15921 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
15922 // CHECK19: omp.dispatch.body:
15923 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
15924 // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4
15925 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
15926 // CHECK19: omp.inner.for.cond:
15927 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68:![0-9]+]]
15928 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP68]]
15929 // CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
15930 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
15931 // CHECK19: omp.inner.for.body:
15932 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
15933 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
15934 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
15935 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
15936 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP68]]
15937 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP12]]
15938 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP68]]
15939 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
15940 // CHECK19: omp.body.continue:
15941 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
15942 // CHECK19: omp.inner.for.inc:
15943 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
15944 // CHECK19-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP13]], 1
15945 // CHECK19-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP68]]
15946 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP69:![0-9]+]]
15947 // CHECK19: omp.inner.for.end:
15948 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
15949 // CHECK19: omp.dispatch.inc:
15950 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
15951 // CHECK19: omp.dispatch.end:
15952 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
15953 // CHECK19-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
15954 // CHECK19-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
15955 // CHECK19: .omp.final.then:
15956 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
15957 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
15958 // CHECK19: .omp.final.done:
15959 // CHECK19-NEXT: ret void
15962 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142
15963 // CHECK19-SAME: (i32 noundef [[M:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] {
15964 // CHECK19-NEXT: entry:
15965 // CHECK19-NEXT: [[M_ADDR:%.*]] = alloca i32, align 4
15966 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15967 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
15968 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15969 // CHECK19-NEXT: store i32 [[M]], ptr [[M_ADDR]], align 4
15970 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15971 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
15972 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[M_ADDR]], align 4
15973 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
15974 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
15975 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15976 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
15977 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined, ptr [[TMP0]], i32 [[TMP3]])
15978 // CHECK19-NEXT: ret void
15981 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined
15982 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
15983 // CHECK19-NEXT: entry:
15984 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
15985 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
15986 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
15987 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
15988 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
15989 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
15990 // CHECK19-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
15991 // CHECK19-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
15992 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
15993 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
15994 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
15995 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
15996 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
15997 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
15998 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
15999 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
16000 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
16001 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
16002 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
16003 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
16004 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
16005 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
16006 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
16007 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
16008 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
16009 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
16010 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
16011 // CHECK19: cond.true:
16012 // CHECK19-NEXT: br label [[COND_END:%.*]]
16013 // CHECK19: cond.false:
16014 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
16015 // CHECK19-NEXT: br label [[COND_END]]
16016 // CHECK19: cond.end:
16017 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
16018 // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
16019 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
16020 // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
16021 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16022 // CHECK19: omp.inner.for.cond:
16023 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71:![0-9]+]]
16024 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
16025 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
16026 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16027 // CHECK19: omp.inner.for.body:
16028 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP71]]
16029 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP71]]
16030 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group [[ACC_GRP71]]
16031 // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
16032 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group [[ACC_GRP71]]
16033 // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP11]]), !llvm.access.group [[ACC_GRP71]]
16034 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16035 // CHECK19: omp.inner.for.inc:
16036 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
16037 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP71]]
16038 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
16039 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP71]]
16040 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP72:![0-9]+]]
16041 // CHECK19: omp.inner.for.end:
16042 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
16043 // CHECK19: omp.loop.exit:
16044 // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
16045 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
16046 // CHECK19-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
16047 // CHECK19-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16048 // CHECK19: .omp.final.then:
16049 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
16050 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
16051 // CHECK19: .omp.final.done:
16052 // CHECK19-NEXT: ret void
16055 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.omp_outlined.omp_outlined
16056 // CHECK19-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] {
16057 // CHECK19-NEXT: entry:
16058 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
16059 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
16060 // CHECK19-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
16061 // CHECK19-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
16062 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
16063 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
16064 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16065 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4
16066 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16067 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16068 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
16069 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
16070 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4
16071 // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
16072 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
16073 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
16074 // CHECK19-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
16075 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
16076 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
16077 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
16078 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16079 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
16080 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
16081 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
16082 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
16083 // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
16084 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
16085 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
16086 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
16087 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16088 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
16089 // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
16090 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
16091 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB3]], i32 [[TMP7]], i32 1073741859, i32 [[TMP4]], i32 [[TMP5]], i32 1, i32 [[TMP3]])
16092 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
16093 // CHECK19: omp.dispatch.cond:
16094 // CHECK19-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_dispatch_next_4(ptr @[[GLOB3]], i32 [[TMP7]], ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]])
16095 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP8]], 0
16096 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
16097 // CHECK19: omp.dispatch.body:
16098 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16099 // CHECK19-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
16100 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16101 // CHECK19: omp.inner.for.cond:
16102 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74:![0-9]+]]
16103 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP74]]
16104 // CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]]
16105 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16106 // CHECK19: omp.inner.for.body:
16107 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
16108 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
16109 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16110 // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
16111 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP74]]
16112 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP13]]
16113 // CHECK19-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP74]]
16114 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16115 // CHECK19: omp.body.continue:
16116 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16117 // CHECK19: omp.inner.for.inc:
16118 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
16119 // CHECK19-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP14]], 1
16120 // CHECK19-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP74]]
16121 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP75:![0-9]+]]
16122 // CHECK19: omp.inner.for.end:
16123 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
16124 // CHECK19: omp.dispatch.inc:
16125 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]]
16126 // CHECK19: omp.dispatch.end:
16127 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
16128 // CHECK19-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
16129 // CHECK19-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
16130 // CHECK19: .omp.final.then:
16131 // CHECK19-NEXT: store i32 10, ptr [[I]], align 4
16132 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]]
16133 // CHECK19: .omp.final.done:
16134 // CHECK19-NEXT: ret void
16137 // CHECK19-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
16138 // CHECK19-SAME: () #[[ATTR7:[0-9]+]] {
16139 // CHECK19-NEXT: entry:
16140 // CHECK19-NEXT: call void @__tgt_register_requires(i64 1)
16141 // CHECK19-NEXT: ret void
16144 // CHECK21-LABEL: define {{[^@]+}}@main
16145 // CHECK21-SAME: (i32 noundef signext [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
16146 // CHECK21-NEXT: entry:
16147 // CHECK21-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
16148 // CHECK21-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16149 // CHECK21-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8
16150 // CHECK21-NEXT: [[N:%.*]] = alloca i32, align 4
16151 // CHECK21-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
16152 // CHECK21-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
16153 // CHECK21-NEXT: [[M:%.*]] = alloca i32, align 4
16154 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
16155 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16156 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16157 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16158 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16159 // CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
16160 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16161 // CHECK21-NEXT: [[I3:%.*]] = alloca i32, align 4
16162 // CHECK21-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
16163 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
16164 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
16165 // CHECK21-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4
16166 // CHECK21-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4
16167 // CHECK21-NEXT: [[I18:%.*]] = alloca i32, align 4
16168 // CHECK21-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4
16169 // CHECK21-NEXT: [[I22:%.*]] = alloca i32, align 4
16170 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4
16171 // CHECK21-NEXT: [[_TMP40:%.*]] = alloca i32, align 4
16172 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
16173 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4
16174 // CHECK21-NEXT: [[DOTOMP_LB46:%.*]] = alloca i32, align 4
16175 // CHECK21-NEXT: [[DOTOMP_UB47:%.*]] = alloca i32, align 4
16176 // CHECK21-NEXT: [[I48:%.*]] = alloca i32, align 4
16177 // CHECK21-NEXT: [[DOTOMP_IV51:%.*]] = alloca i32, align 4
16178 // CHECK21-NEXT: [[I52:%.*]] = alloca i32, align 4
16179 // CHECK21-NEXT: [[_TMP69:%.*]] = alloca i32, align 4
16180 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_70:%.*]] = alloca i32, align 4
16181 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_71:%.*]] = alloca i32, align 4
16182 // CHECK21-NEXT: [[DOTOMP_LB75:%.*]] = alloca i32, align 4
16183 // CHECK21-NEXT: [[DOTOMP_UB76:%.*]] = alloca i32, align 4
16184 // CHECK21-NEXT: [[I77:%.*]] = alloca i32, align 4
16185 // CHECK21-NEXT: [[DOTOMP_IV80:%.*]] = alloca i32, align 4
16186 // CHECK21-NEXT: [[I81:%.*]] = alloca i32, align 4
16187 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
16188 // CHECK21-NEXT: [[_TMP99:%.*]] = alloca i32, align 4
16189 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_100:%.*]] = alloca i32, align 4
16190 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_101:%.*]] = alloca i32, align 4
16191 // CHECK21-NEXT: [[DOTOMP_LB105:%.*]] = alloca i32, align 4
16192 // CHECK21-NEXT: [[DOTOMP_UB106:%.*]] = alloca i32, align 4
16193 // CHECK21-NEXT: [[I107:%.*]] = alloca i32, align 4
16194 // CHECK21-NEXT: [[DOTOMP_IV110:%.*]] = alloca i32, align 4
16195 // CHECK21-NEXT: [[I111:%.*]] = alloca i32, align 4
16196 // CHECK21-NEXT: store i32 0, ptr [[RETVAL]], align 4
16197 // CHECK21-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
16198 // CHECK21-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8
16199 // CHECK21-NEXT: store i32 100, ptr [[N]], align 4
16200 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
16201 // CHECK21-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
16202 // CHECK21-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
16203 // CHECK21-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
16204 // CHECK21-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4
16205 // CHECK21-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
16206 // CHECK21-NEXT: store i32 10, ptr [[M]], align 4
16207 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
16208 // CHECK21-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4
16209 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16210 // CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
16211 // CHECK21-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16212 // CHECK21-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16213 // CHECK21-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
16214 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16215 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
16216 // CHECK21-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4
16217 // CHECK21-NEXT: store i32 0, ptr [[I]], align 4
16218 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16219 // CHECK21-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
16220 // CHECK21-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
16221 // CHECK21: simd.if.then:
16222 // CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16223 // CHECK21-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4
16224 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16225 // CHECK21: omp.inner.for.cond:
16226 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
16227 // CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
16228 // CHECK21-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
16229 // CHECK21-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16230 // CHECK21: omp.inner.for.body:
16231 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
16232 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
16233 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16234 // CHECK21-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]]
16235 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]]
16236 // CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
16237 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM]]
16238 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
16239 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16240 // CHECK21: omp.body.continue:
16241 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16242 // CHECK21: omp.inner.for.inc:
16243 // CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
16244 // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1
16245 // CHECK21-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
16246 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
16247 // CHECK21: omp.inner.for.end:
16248 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16249 // CHECK21-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0
16250 // CHECK21-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16251 // CHECK21-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
16252 // CHECK21-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
16253 // CHECK21-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
16254 // CHECK21-NEXT: br label [[SIMD_IF_END]]
16255 // CHECK21: simd.if.end:
16256 // CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[N]], align 4
16257 // CHECK21-NEXT: store i32 [[TMP14]], ptr [[DOTCAPTURE_EXPR_11]], align 4
16258 // CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16259 // CHECK21-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP15]], 0
16260 // CHECK21-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16261 // CHECK21-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
16262 // CHECK21-NEXT: store i32 [[SUB15]], ptr [[DOTCAPTURE_EXPR_12]], align 4
16263 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB16]], align 4
16264 // CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
16265 // CHECK21-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_UB17]], align 4
16266 // CHECK21-NEXT: store i32 0, ptr [[I18]], align 4
16267 // CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16268 // CHECK21-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP17]]
16269 // CHECK21-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END38:%.*]]
16270 // CHECK21: simd.if.then20:
16271 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB16]], align 4
16272 // CHECK21-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV21]], align 4
16273 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]]
16274 // CHECK21: omp.inner.for.cond23:
16275 // CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
16276 // CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB17]], align 4, !llvm.access.group [[ACC_GRP6]]
16277 // CHECK21-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]]
16278 // CHECK21-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
16279 // CHECK21: omp.inner.for.body25:
16280 // CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6]]
16281 // CHECK21-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP21]], 1
16282 // CHECK21-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
16283 // CHECK21-NEXT: store i32 [[ADD27]], ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP6]]
16284 // CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP6]]
16285 // CHECK21-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP22]] to i64
16286 // CHECK21-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM28]]
16287 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX29]], align 4, !llvm.access.group [[ACC_GRP6]]
16288 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]]
16289 // CHECK21: omp.body.continue30:
16290 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]]
16291 // CHECK21: omp.inner.for.inc31:
16292 // CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6]]
16293 // CHECK21-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP23]], 1
16294 // CHECK21-NEXT: store i32 [[ADD32]], ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP6]]
16295 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP7:![0-9]+]]
16296 // CHECK21: omp.inner.for.end33:
16297 // CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16298 // CHECK21-NEXT: [[SUB34:%.*]] = sub nsw i32 [[TMP24]], 0
16299 // CHECK21-NEXT: [[DIV35:%.*]] = sdiv i32 [[SUB34]], 1
16300 // CHECK21-NEXT: [[MUL36:%.*]] = mul nsw i32 [[DIV35]], 1
16301 // CHECK21-NEXT: [[ADD37:%.*]] = add nsw i32 0, [[MUL36]]
16302 // CHECK21-NEXT: store i32 [[ADD37]], ptr [[I22]], align 4
16303 // CHECK21-NEXT: br label [[SIMD_IF_END38]]
16304 // CHECK21: simd.if.end38:
16305 // CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[M]], align 4
16306 // CHECK21-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_39]], align 4
16307 // CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[N]], align 4
16308 // CHECK21-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_41]], align 4
16309 // CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16310 // CHECK21-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP27]], 0
16311 // CHECK21-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1
16312 // CHECK21-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1
16313 // CHECK21-NEXT: store i32 [[SUB45]], ptr [[DOTCAPTURE_EXPR_42]], align 4
16314 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB46]], align 4
16315 // CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_42]], align 4
16316 // CHECK21-NEXT: store i32 [[TMP28]], ptr [[DOTOMP_UB47]], align 4
16317 // CHECK21-NEXT: store i32 0, ptr [[I48]], align 4
16318 // CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16319 // CHECK21-NEXT: [[CMP49:%.*]] = icmp slt i32 0, [[TMP29]]
16320 // CHECK21-NEXT: br i1 [[CMP49]], label [[SIMD_IF_THEN50:%.*]], label [[SIMD_IF_END68:%.*]]
16321 // CHECK21: simd.if.then50:
16322 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_LB46]], align 4
16323 // CHECK21-NEXT: store i32 [[TMP30]], ptr [[DOTOMP_IV51]], align 4
16324 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND53:%.*]]
16325 // CHECK21: omp.inner.for.cond53:
16326 // CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
16327 // CHECK21-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_UB47]], align 4, !llvm.access.group [[ACC_GRP9]]
16328 // CHECK21-NEXT: [[CMP54:%.*]] = icmp sle i32 [[TMP31]], [[TMP32]]
16329 // CHECK21-NEXT: br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END63:%.*]]
16330 // CHECK21: omp.inner.for.body55:
16331 // CHECK21-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9]]
16332 // CHECK21-NEXT: [[MUL56:%.*]] = mul nsw i32 [[TMP33]], 1
16333 // CHECK21-NEXT: [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
16334 // CHECK21-NEXT: store i32 [[ADD57]], ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP9]]
16335 // CHECK21-NEXT: [[TMP34:%.*]] = load i32, ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP9]]
16336 // CHECK21-NEXT: [[IDXPROM58:%.*]] = sext i32 [[TMP34]] to i64
16337 // CHECK21-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM58]]
16338 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX59]], align 4, !llvm.access.group [[ACC_GRP9]]
16339 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE60:%.*]]
16340 // CHECK21: omp.body.continue60:
16341 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC61:%.*]]
16342 // CHECK21: omp.inner.for.inc61:
16343 // CHECK21-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9]]
16344 // CHECK21-NEXT: [[ADD62:%.*]] = add nsw i32 [[TMP35]], 1
16345 // CHECK21-NEXT: store i32 [[ADD62]], ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP9]]
16346 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP10:![0-9]+]]
16347 // CHECK21: omp.inner.for.end63:
16348 // CHECK21-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16349 // CHECK21-NEXT: [[SUB64:%.*]] = sub nsw i32 [[TMP36]], 0
16350 // CHECK21-NEXT: [[DIV65:%.*]] = sdiv i32 [[SUB64]], 1
16351 // CHECK21-NEXT: [[MUL66:%.*]] = mul nsw i32 [[DIV65]], 1
16352 // CHECK21-NEXT: [[ADD67:%.*]] = add nsw i32 0, [[MUL66]]
16353 // CHECK21-NEXT: store i32 [[ADD67]], ptr [[I52]], align 4
16354 // CHECK21-NEXT: br label [[SIMD_IF_END68]]
16355 // CHECK21: simd.if.end68:
16356 // CHECK21-NEXT: [[TMP37:%.*]] = load i32, ptr [[N]], align 4
16357 // CHECK21-NEXT: store i32 [[TMP37]], ptr [[DOTCAPTURE_EXPR_70]], align 4
16358 // CHECK21-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_70]], align 4
16359 // CHECK21-NEXT: [[SUB72:%.*]] = sub nsw i32 [[TMP38]], 0
16360 // CHECK21-NEXT: [[DIV73:%.*]] = sdiv i32 [[SUB72]], 1
16361 // CHECK21-NEXT: [[SUB74:%.*]] = sub nsw i32 [[DIV73]], 1
16362 // CHECK21-NEXT: store i32 [[SUB74]], ptr [[DOTCAPTURE_EXPR_71]], align 4
16363 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB75]], align 4
16364 // CHECK21-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_71]], align 4
16365 // CHECK21-NEXT: store i32 [[TMP39]], ptr [[DOTOMP_UB76]], align 4
16366 // CHECK21-NEXT: store i32 0, ptr [[I77]], align 4
16367 // CHECK21-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_70]], align 4
16368 // CHECK21-NEXT: [[CMP78:%.*]] = icmp slt i32 0, [[TMP40]]
16369 // CHECK21-NEXT: br i1 [[CMP78]], label [[SIMD_IF_THEN79:%.*]], label [[SIMD_IF_END97:%.*]]
16370 // CHECK21: simd.if.then79:
16371 // CHECK21-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_LB75]], align 4
16372 // CHECK21-NEXT: store i32 [[TMP41]], ptr [[DOTOMP_IV80]], align 4
16373 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND82:%.*]]
16374 // CHECK21: omp.inner.for.cond82:
16375 // CHECK21-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
16376 // CHECK21-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_UB76]], align 4, !llvm.access.group [[ACC_GRP12]]
16377 // CHECK21-NEXT: [[CMP83:%.*]] = icmp sle i32 [[TMP42]], [[TMP43]]
16378 // CHECK21-NEXT: br i1 [[CMP83]], label [[OMP_INNER_FOR_BODY84:%.*]], label [[OMP_INNER_FOR_END92:%.*]]
16379 // CHECK21: omp.inner.for.body84:
16380 // CHECK21-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12]]
16381 // CHECK21-NEXT: [[MUL85:%.*]] = mul nsw i32 [[TMP44]], 1
16382 // CHECK21-NEXT: [[ADD86:%.*]] = add nsw i32 0, [[MUL85]]
16383 // CHECK21-NEXT: store i32 [[ADD86]], ptr [[I81]], align 4, !llvm.access.group [[ACC_GRP12]]
16384 // CHECK21-NEXT: [[TMP45:%.*]] = load i32, ptr [[I81]], align 4, !llvm.access.group [[ACC_GRP12]]
16385 // CHECK21-NEXT: [[IDXPROM87:%.*]] = sext i32 [[TMP45]] to i64
16386 // CHECK21-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM87]]
16387 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX88]], align 4, !llvm.access.group [[ACC_GRP12]]
16388 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE89:%.*]]
16389 // CHECK21: omp.body.continue89:
16390 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC90:%.*]]
16391 // CHECK21: omp.inner.for.inc90:
16392 // CHECK21-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12]]
16393 // CHECK21-NEXT: [[ADD91:%.*]] = add nsw i32 [[TMP46]], 1
16394 // CHECK21-NEXT: store i32 [[ADD91]], ptr [[DOTOMP_IV80]], align 4, !llvm.access.group [[ACC_GRP12]]
16395 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND82]], !llvm.loop [[LOOP13:![0-9]+]]
16396 // CHECK21: omp.inner.for.end92:
16397 // CHECK21-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_70]], align 4
16398 // CHECK21-NEXT: [[SUB93:%.*]] = sub nsw i32 [[TMP47]], 0
16399 // CHECK21-NEXT: [[DIV94:%.*]] = sdiv i32 [[SUB93]], 1
16400 // CHECK21-NEXT: [[MUL95:%.*]] = mul nsw i32 [[DIV94]], 1
16401 // CHECK21-NEXT: [[ADD96:%.*]] = add nsw i32 0, [[MUL95]]
16402 // CHECK21-NEXT: store i32 [[ADD96]], ptr [[I81]], align 4
16403 // CHECK21-NEXT: br label [[SIMD_IF_END97]]
16404 // CHECK21: simd.if.end97:
16405 // CHECK21-NEXT: [[TMP48:%.*]] = load i32, ptr [[M]], align 4
16406 // CHECK21-NEXT: store i32 [[TMP48]], ptr [[DOTCAPTURE_EXPR_98]], align 4
16407 // CHECK21-NEXT: [[TMP49:%.*]] = load i32, ptr [[N]], align 4
16408 // CHECK21-NEXT: store i32 [[TMP49]], ptr [[DOTCAPTURE_EXPR_100]], align 4
16409 // CHECK21-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_100]], align 4
16410 // CHECK21-NEXT: [[SUB102:%.*]] = sub nsw i32 [[TMP50]], 0
16411 // CHECK21-NEXT: [[DIV103:%.*]] = sdiv i32 [[SUB102]], 1
16412 // CHECK21-NEXT: [[SUB104:%.*]] = sub nsw i32 [[DIV103]], 1
16413 // CHECK21-NEXT: store i32 [[SUB104]], ptr [[DOTCAPTURE_EXPR_101]], align 4
16414 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB105]], align 4
16415 // CHECK21-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_101]], align 4
16416 // CHECK21-NEXT: store i32 [[TMP51]], ptr [[DOTOMP_UB106]], align 4
16417 // CHECK21-NEXT: store i32 0, ptr [[I107]], align 4
16418 // CHECK21-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_100]], align 4
16419 // CHECK21-NEXT: [[CMP108:%.*]] = icmp slt i32 0, [[TMP52]]
16420 // CHECK21-NEXT: br i1 [[CMP108]], label [[SIMD_IF_THEN109:%.*]], label [[SIMD_IF_END127:%.*]]
16421 // CHECK21: simd.if.then109:
16422 // CHECK21-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_LB105]], align 4
16423 // CHECK21-NEXT: store i32 [[TMP53]], ptr [[DOTOMP_IV110]], align 4
16424 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND112:%.*]]
16425 // CHECK21: omp.inner.for.cond112:
16426 // CHECK21-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
16427 // CHECK21-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_UB106]], align 4, !llvm.access.group [[ACC_GRP15]]
16428 // CHECK21-NEXT: [[CMP113:%.*]] = icmp sle i32 [[TMP54]], [[TMP55]]
16429 // CHECK21-NEXT: br i1 [[CMP113]], label [[OMP_INNER_FOR_BODY114:%.*]], label [[OMP_INNER_FOR_END122:%.*]]
16430 // CHECK21: omp.inner.for.body114:
16431 // CHECK21-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15]]
16432 // CHECK21-NEXT: [[MUL115:%.*]] = mul nsw i32 [[TMP56]], 1
16433 // CHECK21-NEXT: [[ADD116:%.*]] = add nsw i32 0, [[MUL115]]
16434 // CHECK21-NEXT: store i32 [[ADD116]], ptr [[I111]], align 4, !llvm.access.group [[ACC_GRP15]]
16435 // CHECK21-NEXT: [[TMP57:%.*]] = load i32, ptr [[I111]], align 4, !llvm.access.group [[ACC_GRP15]]
16436 // CHECK21-NEXT: [[IDXPROM117:%.*]] = sext i32 [[TMP57]] to i64
16437 // CHECK21-NEXT: [[ARRAYIDX118:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM117]]
16438 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX118]], align 4, !llvm.access.group [[ACC_GRP15]]
16439 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE119:%.*]]
16440 // CHECK21: omp.body.continue119:
16441 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC120:%.*]]
16442 // CHECK21: omp.inner.for.inc120:
16443 // CHECK21-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15]]
16444 // CHECK21-NEXT: [[ADD121:%.*]] = add nsw i32 [[TMP58]], 1
16445 // CHECK21-NEXT: store i32 [[ADD121]], ptr [[DOTOMP_IV110]], align 4, !llvm.access.group [[ACC_GRP15]]
16446 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND112]], !llvm.loop [[LOOP16:![0-9]+]]
16447 // CHECK21: omp.inner.for.end122:
16448 // CHECK21-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_100]], align 4
16449 // CHECK21-NEXT: [[SUB123:%.*]] = sub nsw i32 [[TMP59]], 0
16450 // CHECK21-NEXT: [[DIV124:%.*]] = sdiv i32 [[SUB123]], 1
16451 // CHECK21-NEXT: [[MUL125:%.*]] = mul nsw i32 [[DIV124]], 1
16452 // CHECK21-NEXT: [[ADD126:%.*]] = add nsw i32 0, [[MUL125]]
16453 // CHECK21-NEXT: store i32 [[ADD126]], ptr [[I111]], align 4
16454 // CHECK21-NEXT: br label [[SIMD_IF_END127]]
16455 // CHECK21: simd.if.end127:
16456 // CHECK21-NEXT: [[TMP60:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
16457 // CHECK21-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP60]])
16458 // CHECK21-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
16459 // CHECK21-NEXT: [[TMP61:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
16460 // CHECK21-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP61]])
16461 // CHECK21-NEXT: [[TMP62:%.*]] = load i32, ptr [[RETVAL]], align 4
16462 // CHECK21-NEXT: ret i32 [[TMP62]]
16465 // CHECK21-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
16466 // CHECK21-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
16467 // CHECK21-NEXT: entry:
16468 // CHECK21-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16469 // CHECK21-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
16470 // CHECK21-NEXT: [[M:%.*]] = alloca i32, align 4
16471 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4
16472 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16473 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16474 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16475 // CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4
16476 // CHECK21-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
16477 // CHECK21-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
16478 // CHECK21-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
16479 // CHECK21-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
16480 // CHECK21-NEXT: [[I6:%.*]] = alloca i32, align 4
16481 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16482 // CHECK21-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
16483 // CHECK21-NEXT: [[DOTOMP_LB19:%.*]] = alloca i32, align 4
16484 // CHECK21-NEXT: [[DOTOMP_UB20:%.*]] = alloca i32, align 4
16485 // CHECK21-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4
16486 // CHECK21-NEXT: [[I22:%.*]] = alloca i32, align 4
16487 // CHECK21-NEXT: [[_TMP34:%.*]] = alloca i32, align 4
16488 // CHECK21-NEXT: [[DOTOMP_LB35:%.*]] = alloca i32, align 4
16489 // CHECK21-NEXT: [[DOTOMP_UB36:%.*]] = alloca i32, align 4
16490 // CHECK21-NEXT: [[DOTOMP_IV37:%.*]] = alloca i32, align 4
16491 // CHECK21-NEXT: [[I38:%.*]] = alloca i32, align 4
16492 // CHECK21-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4
16493 // CHECK21-NEXT: [[_TMP51:%.*]] = alloca i32, align 4
16494 // CHECK21-NEXT: [[DOTOMP_LB52:%.*]] = alloca i32, align 4
16495 // CHECK21-NEXT: [[DOTOMP_UB53:%.*]] = alloca i32, align 4
16496 // CHECK21-NEXT: [[DOTOMP_IV54:%.*]] = alloca i32, align 4
16497 // CHECK21-NEXT: [[I55:%.*]] = alloca i32, align 4
16498 // CHECK21-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
16499 // CHECK21-NEXT: store i32 10, ptr [[M]], align 4
16500 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16501 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
16502 // CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16503 // CHECK21-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
16504 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16505 // CHECK21: omp.inner.for.cond:
16506 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
16507 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
16508 // CHECK21-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
16509 // CHECK21-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16510 // CHECK21: omp.inner.for.body:
16511 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
16512 // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
16513 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16514 // CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
16515 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]]
16516 // CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
16517 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]]
16518 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
16519 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16520 // CHECK21: omp.body.continue:
16521 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16522 // CHECK21: omp.inner.for.inc:
16523 // CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
16524 // CHECK21-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
16525 // CHECK21-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
16526 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
16527 // CHECK21: omp.inner.for.end:
16528 // CHECK21-NEXT: store i32 10, ptr [[I]], align 4
16529 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
16530 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB4]], align 4
16531 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
16532 // CHECK21-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV5]], align 4
16533 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
16534 // CHECK21: omp.inner.for.cond7:
16535 // CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
16536 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP21]]
16537 // CHECK21-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
16538 // CHECK21-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
16539 // CHECK21: omp.inner.for.body9:
16540 // CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]]
16541 // CHECK21-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
16542 // CHECK21-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
16543 // CHECK21-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP21]]
16544 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP21]]
16545 // CHECK21-NEXT: [[IDXPROM12:%.*]] = sext i32 [[TMP10]] to i64
16546 // CHECK21-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM12]]
16547 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX13]], align 4, !llvm.access.group [[ACC_GRP21]]
16548 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]]
16549 // CHECK21: omp.body.continue14:
16550 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]]
16551 // CHECK21: omp.inner.for.inc15:
16552 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]]
16553 // CHECK21-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP11]], 1
16554 // CHECK21-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]]
16555 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]]
16556 // CHECK21: omp.inner.for.end17:
16557 // CHECK21-NEXT: store i32 10, ptr [[I6]], align 4
16558 // CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[M]], align 4
16559 // CHECK21-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 4
16560 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB19]], align 4
16561 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB20]], align 4
16562 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB19]], align 4
16563 // CHECK21-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV21]], align 4
16564 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]]
16565 // CHECK21: omp.inner.for.cond23:
16566 // CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
16567 // CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB20]], align 4, !llvm.access.group [[ACC_GRP24]]
16568 // CHECK21-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
16569 // CHECK21-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]]
16570 // CHECK21: omp.inner.for.body25:
16571 // CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24]]
16572 // CHECK21-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP16]], 1
16573 // CHECK21-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
16574 // CHECK21-NEXT: store i32 [[ADD27]], ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP24]]
16575 // CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP24]]
16576 // CHECK21-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP17]] to i64
16577 // CHECK21-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM28]]
16578 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX29]], align 4, !llvm.access.group [[ACC_GRP24]]
16579 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]]
16580 // CHECK21: omp.body.continue30:
16581 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]]
16582 // CHECK21: omp.inner.for.inc31:
16583 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24]]
16584 // CHECK21-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP18]], 1
16585 // CHECK21-NEXT: store i32 [[ADD32]], ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP24]]
16586 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP25:![0-9]+]]
16587 // CHECK21: omp.inner.for.end33:
16588 // CHECK21-NEXT: store i32 10, ptr [[I22]], align 4
16589 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB35]], align 4
16590 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB36]], align 4
16591 // CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB35]], align 4
16592 // CHECK21-NEXT: store i32 [[TMP19]], ptr [[DOTOMP_IV37]], align 4
16593 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND39:%.*]]
16594 // CHECK21: omp.inner.for.cond39:
16595 // CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
16596 // CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB36]], align 4, !llvm.access.group [[ACC_GRP27]]
16597 // CHECK21-NEXT: [[CMP40:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
16598 // CHECK21-NEXT: br i1 [[CMP40]], label [[OMP_INNER_FOR_BODY41:%.*]], label [[OMP_INNER_FOR_END49:%.*]]
16599 // CHECK21: omp.inner.for.body41:
16600 // CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27]]
16601 // CHECK21-NEXT: [[MUL42:%.*]] = mul nsw i32 [[TMP22]], 1
16602 // CHECK21-NEXT: [[ADD43:%.*]] = add nsw i32 0, [[MUL42]]
16603 // CHECK21-NEXT: store i32 [[ADD43]], ptr [[I38]], align 4, !llvm.access.group [[ACC_GRP27]]
16604 // CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[I38]], align 4, !llvm.access.group [[ACC_GRP27]]
16605 // CHECK21-NEXT: [[IDXPROM44:%.*]] = sext i32 [[TMP23]] to i64
16606 // CHECK21-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM44]]
16607 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX45]], align 4, !llvm.access.group [[ACC_GRP27]]
16608 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE46:%.*]]
16609 // CHECK21: omp.body.continue46:
16610 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC47:%.*]]
16611 // CHECK21: omp.inner.for.inc47:
16612 // CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27]]
16613 // CHECK21-NEXT: [[ADD48:%.*]] = add nsw i32 [[TMP24]], 1
16614 // CHECK21-NEXT: store i32 [[ADD48]], ptr [[DOTOMP_IV37]], align 4, !llvm.access.group [[ACC_GRP27]]
16615 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND39]], !llvm.loop [[LOOP28:![0-9]+]]
16616 // CHECK21: omp.inner.for.end49:
16617 // CHECK21-NEXT: store i32 10, ptr [[I38]], align 4
16618 // CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[M]], align 4
16619 // CHECK21-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_50]], align 4
16620 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB52]], align 4
16621 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB53]], align 4
16622 // CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB52]], align 4
16623 // CHECK21-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV54]], align 4
16624 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND56:%.*]]
16625 // CHECK21: omp.inner.for.cond56:
16626 // CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
16627 // CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB53]], align 4, !llvm.access.group [[ACC_GRP30]]
16628 // CHECK21-NEXT: [[CMP57:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
16629 // CHECK21-NEXT: br i1 [[CMP57]], label [[OMP_INNER_FOR_BODY58:%.*]], label [[OMP_INNER_FOR_END66:%.*]]
16630 // CHECK21: omp.inner.for.body58:
16631 // CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30]]
16632 // CHECK21-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP29]], 1
16633 // CHECK21-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]]
16634 // CHECK21-NEXT: store i32 [[ADD60]], ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP30]]
16635 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[I55]], align 4, !llvm.access.group [[ACC_GRP30]]
16636 // CHECK21-NEXT: [[IDXPROM61:%.*]] = sext i32 [[TMP30]] to i64
16637 // CHECK21-NEXT: [[ARRAYIDX62:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM61]]
16638 // CHECK21-NEXT: store i32 0, ptr [[ARRAYIDX62]], align 4, !llvm.access.group [[ACC_GRP30]]
16639 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE63:%.*]]
16640 // CHECK21: omp.body.continue63:
16641 // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC64:%.*]]
16642 // CHECK21: omp.inner.for.inc64:
16643 // CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30]]
16644 // CHECK21-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP31]], 1
16645 // CHECK21-NEXT: store i32 [[ADD65]], ptr [[DOTOMP_IV54]], align 4, !llvm.access.group [[ACC_GRP30]]
16646 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND56]], !llvm.loop [[LOOP31:![0-9]+]]
16647 // CHECK21: omp.inner.for.end66:
16648 // CHECK21-NEXT: store i32 10, ptr [[I55]], align 4
16649 // CHECK21-NEXT: ret i32 0
16652 // CHECK23-LABEL: define {{[^@]+}}@main
16653 // CHECK23-SAME: (i32 noundef [[ARGC:%.*]], ptr noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
16654 // CHECK23-NEXT: entry:
16655 // CHECK23-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
16656 // CHECK23-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16657 // CHECK23-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 4
16658 // CHECK23-NEXT: [[N:%.*]] = alloca i32, align 4
16659 // CHECK23-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 4
16660 // CHECK23-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4
16661 // CHECK23-NEXT: [[M:%.*]] = alloca i32, align 4
16662 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
16663 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16664 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
16665 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16666 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16667 // CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
16668 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16669 // CHECK23-NEXT: [[I3:%.*]] = alloca i32, align 4
16670 // CHECK23-NEXT: [[_TMP10:%.*]] = alloca i32, align 4
16671 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4
16672 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
16673 // CHECK23-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4
16674 // CHECK23-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4
16675 // CHECK23-NEXT: [[I18:%.*]] = alloca i32, align 4
16676 // CHECK23-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4
16677 // CHECK23-NEXT: [[I22:%.*]] = alloca i32, align 4
16678 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4
16679 // CHECK23-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
16680 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4
16681 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4
16682 // CHECK23-NEXT: [[DOTOMP_LB45:%.*]] = alloca i32, align 4
16683 // CHECK23-NEXT: [[DOTOMP_UB46:%.*]] = alloca i32, align 4
16684 // CHECK23-NEXT: [[I47:%.*]] = alloca i32, align 4
16685 // CHECK23-NEXT: [[DOTOMP_IV50:%.*]] = alloca i32, align 4
16686 // CHECK23-NEXT: [[I51:%.*]] = alloca i32, align 4
16687 // CHECK23-NEXT: [[_TMP67:%.*]] = alloca i32, align 4
16688 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_68:%.*]] = alloca i32, align 4
16689 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_69:%.*]] = alloca i32, align 4
16690 // CHECK23-NEXT: [[DOTOMP_LB73:%.*]] = alloca i32, align 4
16691 // CHECK23-NEXT: [[DOTOMP_UB74:%.*]] = alloca i32, align 4
16692 // CHECK23-NEXT: [[I75:%.*]] = alloca i32, align 4
16693 // CHECK23-NEXT: [[DOTOMP_IV78:%.*]] = alloca i32, align 4
16694 // CHECK23-NEXT: [[I79:%.*]] = alloca i32, align 4
16695 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_95:%.*]] = alloca i32, align 4
16696 // CHECK23-NEXT: [[_TMP96:%.*]] = alloca i32, align 4
16697 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_97:%.*]] = alloca i32, align 4
16698 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_98:%.*]] = alloca i32, align 4
16699 // CHECK23-NEXT: [[DOTOMP_LB102:%.*]] = alloca i32, align 4
16700 // CHECK23-NEXT: [[DOTOMP_UB103:%.*]] = alloca i32, align 4
16701 // CHECK23-NEXT: [[I104:%.*]] = alloca i32, align 4
16702 // CHECK23-NEXT: [[DOTOMP_IV107:%.*]] = alloca i32, align 4
16703 // CHECK23-NEXT: [[I108:%.*]] = alloca i32, align 4
16704 // CHECK23-NEXT: store i32 0, ptr [[RETVAL]], align 4
16705 // CHECK23-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
16706 // CHECK23-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4
16707 // CHECK23-NEXT: store i32 100, ptr [[N]], align 4
16708 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
16709 // CHECK23-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave.p0()
16710 // CHECK23-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4
16711 // CHECK23-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4
16712 // CHECK23-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4
16713 // CHECK23-NEXT: store i32 10, ptr [[M]], align 4
16714 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4
16715 // CHECK23-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
16716 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16717 // CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
16718 // CHECK23-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
16719 // CHECK23-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
16720 // CHECK23-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
16721 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
16722 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
16723 // CHECK23-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
16724 // CHECK23-NEXT: store i32 0, ptr [[I]], align 4
16725 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16726 // CHECK23-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
16727 // CHECK23-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
16728 // CHECK23: simd.if.then:
16729 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
16730 // CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
16731 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
16732 // CHECK23: omp.inner.for.cond:
16733 // CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]]
16734 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]]
16735 // CHECK23-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
16736 // CHECK23-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
16737 // CHECK23: omp.inner.for.body:
16738 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
16739 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
16740 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
16741 // CHECK23-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]]
16742 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]]
16743 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP10]]
16744 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]]
16745 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
16746 // CHECK23: omp.body.continue:
16747 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
16748 // CHECK23: omp.inner.for.inc:
16749 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
16750 // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1
16751 // CHECK23-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]]
16752 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
16753 // CHECK23: omp.inner.for.end:
16754 // CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
16755 // CHECK23-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0
16756 // CHECK23-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1
16757 // CHECK23-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1
16758 // CHECK23-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]]
16759 // CHECK23-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4
16760 // CHECK23-NEXT: br label [[SIMD_IF_END]]
16761 // CHECK23: simd.if.end:
16762 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[N]], align 4
16763 // CHECK23-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_11]], align 4
16764 // CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16765 // CHECK23-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP14]], 0
16766 // CHECK23-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
16767 // CHECK23-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1
16768 // CHECK23-NEXT: store i32 [[SUB15]], ptr [[DOTCAPTURE_EXPR_12]], align 4
16769 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB16]], align 4
16770 // CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
16771 // CHECK23-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_UB17]], align 4
16772 // CHECK23-NEXT: store i32 0, ptr [[I18]], align 4
16773 // CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16774 // CHECK23-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP16]]
16775 // CHECK23-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END37:%.*]]
16776 // CHECK23: simd.if.then20:
16777 // CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB16]], align 4
16778 // CHECK23-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV21]], align 4
16779 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]]
16780 // CHECK23: omp.inner.for.cond23:
16781 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]]
16782 // CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB17]], align 4, !llvm.access.group [[ACC_GRP7]]
16783 // CHECK23-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]]
16784 // CHECK23-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END32:%.*]]
16785 // CHECK23: omp.inner.for.body25:
16786 // CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7]]
16787 // CHECK23-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP20]], 1
16788 // CHECK23-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]]
16789 // CHECK23-NEXT: store i32 [[ADD27]], ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP7]]
16790 // CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[I22]], align 4, !llvm.access.group [[ACC_GRP7]]
16791 // CHECK23-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP21]]
16792 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX28]], align 4, !llvm.access.group [[ACC_GRP7]]
16793 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE29:%.*]]
16794 // CHECK23: omp.body.continue29:
16795 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC30:%.*]]
16796 // CHECK23: omp.inner.for.inc30:
16797 // CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7]]
16798 // CHECK23-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP22]], 1
16799 // CHECK23-NEXT: store i32 [[ADD31]], ptr [[DOTOMP_IV21]], align 4, !llvm.access.group [[ACC_GRP7]]
16800 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP8:![0-9]+]]
16801 // CHECK23: omp.inner.for.end32:
16802 // CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_11]], align 4
16803 // CHECK23-NEXT: [[SUB33:%.*]] = sub nsw i32 [[TMP23]], 0
16804 // CHECK23-NEXT: [[DIV34:%.*]] = sdiv i32 [[SUB33]], 1
16805 // CHECK23-NEXT: [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 1
16806 // CHECK23-NEXT: [[ADD36:%.*]] = add nsw i32 0, [[MUL35]]
16807 // CHECK23-NEXT: store i32 [[ADD36]], ptr [[I22]], align 4
16808 // CHECK23-NEXT: br label [[SIMD_IF_END37]]
16809 // CHECK23: simd.if.end37:
16810 // CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[M]], align 4
16811 // CHECK23-NEXT: store i32 [[TMP24]], ptr [[DOTCAPTURE_EXPR_38]], align 4
16812 // CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[N]], align 4
16813 // CHECK23-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_40]], align 4
16814 // CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
16815 // CHECK23-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP26]], 0
16816 // CHECK23-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1
16817 // CHECK23-NEXT: [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1
16818 // CHECK23-NEXT: store i32 [[SUB44]], ptr [[DOTCAPTURE_EXPR_41]], align 4
16819 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB45]], align 4
16820 // CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_41]], align 4
16821 // CHECK23-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_UB46]], align 4
16822 // CHECK23-NEXT: store i32 0, ptr [[I47]], align 4
16823 // CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
16824 // CHECK23-NEXT: [[CMP48:%.*]] = icmp slt i32 0, [[TMP28]]
16825 // CHECK23-NEXT: br i1 [[CMP48]], label [[SIMD_IF_THEN49:%.*]], label [[SIMD_IF_END66:%.*]]
16826 // CHECK23: simd.if.then49:
16827 // CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_LB45]], align 4
16828 // CHECK23-NEXT: store i32 [[TMP29]], ptr [[DOTOMP_IV50]], align 4
16829 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND52:%.*]]
16830 // CHECK23: omp.inner.for.cond52:
16831 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]]
16832 // CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_UB46]], align 4, !llvm.access.group [[ACC_GRP10]]
16833 // CHECK23-NEXT: [[CMP53:%.*]] = icmp sle i32 [[TMP30]], [[TMP31]]
16834 // CHECK23-NEXT: br i1 [[CMP53]], label [[OMP_INNER_FOR_BODY54:%.*]], label [[OMP_INNER_FOR_END61:%.*]]
16835 // CHECK23: omp.inner.for.body54:
16836 // CHECK23-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10]]
16837 // CHECK23-NEXT: [[MUL55:%.*]] = mul nsw i32 [[TMP32]], 1
16838 // CHECK23-NEXT: [[ADD56:%.*]] = add nsw i32 0, [[MUL55]]
16839 // CHECK23-NEXT: store i32 [[ADD56]], ptr [[I51]], align 4, !llvm.access.group [[ACC_GRP10]]
16840 // CHECK23-NEXT: [[TMP33:%.*]] = load i32, ptr [[I51]], align 4, !llvm.access.group [[ACC_GRP10]]
16841 // CHECK23-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP33]]
16842 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX57]], align 4, !llvm.access.group [[ACC_GRP10]]
16843 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE58:%.*]]
16844 // CHECK23: omp.body.continue58:
16845 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC59:%.*]]
16846 // CHECK23: omp.inner.for.inc59:
16847 // CHECK23-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10]]
16848 // CHECK23-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP34]], 1
16849 // CHECK23-NEXT: store i32 [[ADD60]], ptr [[DOTOMP_IV50]], align 4, !llvm.access.group [[ACC_GRP10]]
16850 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND52]], !llvm.loop [[LOOP11:![0-9]+]]
16851 // CHECK23: omp.inner.for.end61:
16852 // CHECK23-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_40]], align 4
16853 // CHECK23-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP35]], 0
16854 // CHECK23-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1
16855 // CHECK23-NEXT: [[MUL64:%.*]] = mul nsw i32 [[DIV63]], 1
16856 // CHECK23-NEXT: [[ADD65:%.*]] = add nsw i32 0, [[MUL64]]
16857 // CHECK23-NEXT: store i32 [[ADD65]], ptr [[I51]], align 4
16858 // CHECK23-NEXT: br label [[SIMD_IF_END66]]
16859 // CHECK23: simd.if.end66:
16860 // CHECK23-NEXT: [[TMP36:%.*]] = load i32, ptr [[N]], align 4
16861 // CHECK23-NEXT: store i32 [[TMP36]], ptr [[DOTCAPTURE_EXPR_68]], align 4
16862 // CHECK23-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_68]], align 4
16863 // CHECK23-NEXT: [[SUB70:%.*]] = sub nsw i32 [[TMP37]], 0
16864 // CHECK23-NEXT: [[DIV71:%.*]] = sdiv i32 [[SUB70]], 1
16865 // CHECK23-NEXT: [[SUB72:%.*]] = sub nsw i32 [[DIV71]], 1
16866 // CHECK23-NEXT: store i32 [[SUB72]], ptr [[DOTCAPTURE_EXPR_69]], align 4
16867 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB73]], align 4
16868 // CHECK23-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_69]], align 4
16869 // CHECK23-NEXT: store i32 [[TMP38]], ptr [[DOTOMP_UB74]], align 4
16870 // CHECK23-NEXT: store i32 0, ptr [[I75]], align 4
16871 // CHECK23-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_68]], align 4
16872 // CHECK23-NEXT: [[CMP76:%.*]] = icmp slt i32 0, [[TMP39]]
16873 // CHECK23-NEXT: br i1 [[CMP76]], label [[SIMD_IF_THEN77:%.*]], label [[SIMD_IF_END94:%.*]]
16874 // CHECK23: simd.if.then77:
16875 // CHECK23-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_LB73]], align 4
16876 // CHECK23-NEXT: store i32 [[TMP40]], ptr [[DOTOMP_IV78]], align 4
16877 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND80:%.*]]
16878 // CHECK23: omp.inner.for.cond80:
16879 // CHECK23-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
16880 // CHECK23-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_UB74]], align 4, !llvm.access.group [[ACC_GRP13]]
16881 // CHECK23-NEXT: [[CMP81:%.*]] = icmp sle i32 [[TMP41]], [[TMP42]]
16882 // CHECK23-NEXT: br i1 [[CMP81]], label [[OMP_INNER_FOR_BODY82:%.*]], label [[OMP_INNER_FOR_END89:%.*]]
16883 // CHECK23: omp.inner.for.body82:
16884 // CHECK23-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13]]
16885 // CHECK23-NEXT: [[MUL83:%.*]] = mul nsw i32 [[TMP43]], 1
16886 // CHECK23-NEXT: [[ADD84:%.*]] = add nsw i32 0, [[MUL83]]
16887 // CHECK23-NEXT: store i32 [[ADD84]], ptr [[I79]], align 4, !llvm.access.group [[ACC_GRP13]]
16888 // CHECK23-NEXT: [[TMP44:%.*]] = load i32, ptr [[I79]], align 4, !llvm.access.group [[ACC_GRP13]]
16889 // CHECK23-NEXT: [[ARRAYIDX85:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP44]]
16890 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX85]], align 4, !llvm.access.group [[ACC_GRP13]]
16891 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE86:%.*]]
16892 // CHECK23: omp.body.continue86:
16893 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC87:%.*]]
16894 // CHECK23: omp.inner.for.inc87:
16895 // CHECK23-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13]]
16896 // CHECK23-NEXT: [[ADD88:%.*]] = add nsw i32 [[TMP45]], 1
16897 // CHECK23-NEXT: store i32 [[ADD88]], ptr [[DOTOMP_IV78]], align 4, !llvm.access.group [[ACC_GRP13]]
16898 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND80]], !llvm.loop [[LOOP14:![0-9]+]]
16899 // CHECK23: omp.inner.for.end89:
16900 // CHECK23-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_68]], align 4
16901 // CHECK23-NEXT: [[SUB90:%.*]] = sub nsw i32 [[TMP46]], 0
16902 // CHECK23-NEXT: [[DIV91:%.*]] = sdiv i32 [[SUB90]], 1
16903 // CHECK23-NEXT: [[MUL92:%.*]] = mul nsw i32 [[DIV91]], 1
16904 // CHECK23-NEXT: [[ADD93:%.*]] = add nsw i32 0, [[MUL92]]
16905 // CHECK23-NEXT: store i32 [[ADD93]], ptr [[I79]], align 4
16906 // CHECK23-NEXT: br label [[SIMD_IF_END94]]
16907 // CHECK23: simd.if.end94:
16908 // CHECK23-NEXT: [[TMP47:%.*]] = load i32, ptr [[M]], align 4
16909 // CHECK23-NEXT: store i32 [[TMP47]], ptr [[DOTCAPTURE_EXPR_95]], align 4
16910 // CHECK23-NEXT: [[TMP48:%.*]] = load i32, ptr [[N]], align 4
16911 // CHECK23-NEXT: store i32 [[TMP48]], ptr [[DOTCAPTURE_EXPR_97]], align 4
16912 // CHECK23-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_97]], align 4
16913 // CHECK23-NEXT: [[SUB99:%.*]] = sub nsw i32 [[TMP49]], 0
16914 // CHECK23-NEXT: [[DIV100:%.*]] = sdiv i32 [[SUB99]], 1
16915 // CHECK23-NEXT: [[SUB101:%.*]] = sub nsw i32 [[DIV100]], 1
16916 // CHECK23-NEXT: store i32 [[SUB101]], ptr [[DOTCAPTURE_EXPR_98]], align 4
16917 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB102]], align 4
16918 // CHECK23-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_98]], align 4
16919 // CHECK23-NEXT: store i32 [[TMP50]], ptr [[DOTOMP_UB103]], align 4
16920 // CHECK23-NEXT: store i32 0, ptr [[I104]], align 4
16921 // CHECK23-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_97]], align 4
16922 // CHECK23-NEXT: [[CMP105:%.*]] = icmp slt i32 0, [[TMP51]]
16923 // CHECK23-NEXT: br i1 [[CMP105]], label [[SIMD_IF_THEN106:%.*]], label [[SIMD_IF_END123:%.*]]
16924 // CHECK23: simd.if.then106:
16925 // CHECK23-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_LB102]], align 4
16926 // CHECK23-NEXT: store i32 [[TMP52]], ptr [[DOTOMP_IV107]], align 4
16927 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND109:%.*]]
16928 // CHECK23: omp.inner.for.cond109:
16929 // CHECK23-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]]
16930 // CHECK23-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_UB103]], align 4, !llvm.access.group [[ACC_GRP16]]
16931 // CHECK23-NEXT: [[CMP110:%.*]] = icmp sle i32 [[TMP53]], [[TMP54]]
16932 // CHECK23-NEXT: br i1 [[CMP110]], label [[OMP_INNER_FOR_BODY111:%.*]], label [[OMP_INNER_FOR_END118:%.*]]
16933 // CHECK23: omp.inner.for.body111:
16934 // CHECK23-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16]]
16935 // CHECK23-NEXT: [[MUL112:%.*]] = mul nsw i32 [[TMP55]], 1
16936 // CHECK23-NEXT: [[ADD113:%.*]] = add nsw i32 0, [[MUL112]]
16937 // CHECK23-NEXT: store i32 [[ADD113]], ptr [[I108]], align 4, !llvm.access.group [[ACC_GRP16]]
16938 // CHECK23-NEXT: [[TMP56:%.*]] = load i32, ptr [[I108]], align 4, !llvm.access.group [[ACC_GRP16]]
16939 // CHECK23-NEXT: [[ARRAYIDX114:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP56]]
16940 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX114]], align 4, !llvm.access.group [[ACC_GRP16]]
16941 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE115:%.*]]
16942 // CHECK23: omp.body.continue115:
16943 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC116:%.*]]
16944 // CHECK23: omp.inner.for.inc116:
16945 // CHECK23-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16]]
16946 // CHECK23-NEXT: [[ADD117:%.*]] = add nsw i32 [[TMP57]], 1
16947 // CHECK23-NEXT: store i32 [[ADD117]], ptr [[DOTOMP_IV107]], align 4, !llvm.access.group [[ACC_GRP16]]
16948 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND109]], !llvm.loop [[LOOP17:![0-9]+]]
16949 // CHECK23: omp.inner.for.end118:
16950 // CHECK23-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_97]], align 4
16951 // CHECK23-NEXT: [[SUB119:%.*]] = sub nsw i32 [[TMP58]], 0
16952 // CHECK23-NEXT: [[DIV120:%.*]] = sdiv i32 [[SUB119]], 1
16953 // CHECK23-NEXT: [[MUL121:%.*]] = mul nsw i32 [[DIV120]], 1
16954 // CHECK23-NEXT: [[ADD122:%.*]] = add nsw i32 0, [[MUL121]]
16955 // CHECK23-NEXT: store i32 [[ADD122]], ptr [[I108]], align 4
16956 // CHECK23-NEXT: br label [[SIMD_IF_END123]]
16957 // CHECK23: simd.if.end123:
16958 // CHECK23-NEXT: [[TMP59:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4
16959 // CHECK23-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP59]])
16960 // CHECK23-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4
16961 // CHECK23-NEXT: [[TMP60:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4
16962 // CHECK23-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP60]])
16963 // CHECK23-NEXT: [[TMP61:%.*]] = load i32, ptr [[RETVAL]], align 4
16964 // CHECK23-NEXT: ret i32 [[TMP61]]
16967 // CHECK23-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_
16968 // CHECK23-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat {
16969 // CHECK23-NEXT: entry:
16970 // CHECK23-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4
16971 // CHECK23-NEXT: [[A:%.*]] = alloca [10 x i32], align 4
16972 // CHECK23-NEXT: [[M:%.*]] = alloca i32, align 4
16973 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4
16974 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
16975 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
16976 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
16977 // CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4
16978 // CHECK23-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
16979 // CHECK23-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
16980 // CHECK23-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
16981 // CHECK23-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
16982 // CHECK23-NEXT: [[I6:%.*]] = alloca i32, align 4
16983 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
16984 // CHECK23-NEXT: [[_TMP17:%.*]] = alloca i32, align 4
16985 // CHECK23-NEXT: [[DOTOMP_LB18:%.*]] = alloca i32, align 4
16986 // CHECK23-NEXT: [[DOTOMP_UB19:%.*]] = alloca i32, align 4
16987 // CHECK23-NEXT: [[DOTOMP_IV20:%.*]] = alloca i32, align 4
16988 // CHECK23-NEXT: [[I21:%.*]] = alloca i32, align 4
16989 // CHECK23-NEXT: [[_TMP32:%.*]] = alloca i32, align 4
16990 // CHECK23-NEXT: [[DOTOMP_LB33:%.*]] = alloca i32, align 4
16991 // CHECK23-NEXT: [[DOTOMP_UB34:%.*]] = alloca i32, align 4
16992 // CHECK23-NEXT: [[DOTOMP_IV35:%.*]] = alloca i32, align 4
16993 // CHECK23-NEXT: [[I36:%.*]] = alloca i32, align 4
16994 // CHECK23-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4
16995 // CHECK23-NEXT: [[_TMP48:%.*]] = alloca i32, align 4
16996 // CHECK23-NEXT: [[DOTOMP_LB49:%.*]] = alloca i32, align 4
16997 // CHECK23-NEXT: [[DOTOMP_UB50:%.*]] = alloca i32, align 4
16998 // CHECK23-NEXT: [[DOTOMP_IV51:%.*]] = alloca i32, align 4
16999 // CHECK23-NEXT: [[I52:%.*]] = alloca i32, align 4
17000 // CHECK23-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4
17001 // CHECK23-NEXT: store i32 10, ptr [[M]], align 4
17002 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
17003 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
17004 // CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
17005 // CHECK23-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
17006 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
17007 // CHECK23: omp.inner.for.cond:
17008 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]]
17009 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]]
17010 // CHECK23-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
17011 // CHECK23-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
17012 // CHECK23: omp.inner.for.body:
17013 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
17014 // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
17015 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
17016 // CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
17017 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]]
17018 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP4]]
17019 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]]
17020 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
17021 // CHECK23: omp.body.continue:
17022 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
17023 // CHECK23: omp.inner.for.inc:
17024 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
17025 // CHECK23-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
17026 // CHECK23-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]]
17027 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]]
17028 // CHECK23: omp.inner.for.end:
17029 // CHECK23-NEXT: store i32 10, ptr [[I]], align 4
17030 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
17031 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB4]], align 4
17032 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
17033 // CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV5]], align 4
17034 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
17035 // CHECK23: omp.inner.for.cond7:
17036 // CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
17037 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP22]]
17038 // CHECK23-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
17039 // CHECK23-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
17040 // CHECK23: omp.inner.for.body9:
17041 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]]
17042 // CHECK23-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1
17043 // CHECK23-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
17044 // CHECK23-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP22]]
17045 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP22]]
17046 // CHECK23-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP10]]
17047 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP22]]
17048 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
17049 // CHECK23: omp.body.continue13:
17050 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
17051 // CHECK23: omp.inner.for.inc14:
17052 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]]
17053 // CHECK23-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP11]], 1
17054 // CHECK23-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]]
17055 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]]
17056 // CHECK23: omp.inner.for.end16:
17057 // CHECK23-NEXT: store i32 10, ptr [[I6]], align 4
17058 // CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[M]], align 4
17059 // CHECK23-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 4
17060 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB18]], align 4
17061 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB19]], align 4
17062 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB18]], align 4
17063 // CHECK23-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV20]], align 4
17064 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]]
17065 // CHECK23: omp.inner.for.cond22:
17066 // CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
17067 // CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB19]], align 4, !llvm.access.group [[ACC_GRP25]]
17068 // CHECK23-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
17069 // CHECK23-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END31:%.*]]
17070 // CHECK23: omp.inner.for.body24:
17071 // CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25]]
17072 // CHECK23-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP16]], 1
17073 // CHECK23-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]]
17074 // CHECK23-NEXT: store i32 [[ADD26]], ptr [[I21]], align 4, !llvm.access.group [[ACC_GRP25]]
17075 // CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[I21]], align 4, !llvm.access.group [[ACC_GRP25]]
17076 // CHECK23-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP17]]
17077 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX27]], align 4, !llvm.access.group [[ACC_GRP25]]
17078 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]]
17079 // CHECK23: omp.body.continue28:
17080 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]]
17081 // CHECK23: omp.inner.for.inc29:
17082 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25]]
17083 // CHECK23-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP18]], 1
17084 // CHECK23-NEXT: store i32 [[ADD30]], ptr [[DOTOMP_IV20]], align 4, !llvm.access.group [[ACC_GRP25]]
17085 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP26:![0-9]+]]
17086 // CHECK23: omp.inner.for.end31:
17087 // CHECK23-NEXT: store i32 10, ptr [[I21]], align 4
17088 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB33]], align 4
17089 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB34]], align 4
17090 // CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB33]], align 4
17091 // CHECK23-NEXT: store i32 [[TMP19]], ptr [[DOTOMP_IV35]], align 4
17092 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND37:%.*]]
17093 // CHECK23: omp.inner.for.cond37:
17094 // CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
17095 // CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB34]], align 4, !llvm.access.group [[ACC_GRP28]]
17096 // CHECK23-NEXT: [[CMP38:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]]
17097 // CHECK23-NEXT: br i1 [[CMP38]], label [[OMP_INNER_FOR_BODY39:%.*]], label [[OMP_INNER_FOR_END46:%.*]]
17098 // CHECK23: omp.inner.for.body39:
17099 // CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28]]
17100 // CHECK23-NEXT: [[MUL40:%.*]] = mul nsw i32 [[TMP22]], 1
17101 // CHECK23-NEXT: [[ADD41:%.*]] = add nsw i32 0, [[MUL40]]
17102 // CHECK23-NEXT: store i32 [[ADD41]], ptr [[I36]], align 4, !llvm.access.group [[ACC_GRP28]]
17103 // CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[I36]], align 4, !llvm.access.group [[ACC_GRP28]]
17104 // CHECK23-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP23]]
17105 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX42]], align 4, !llvm.access.group [[ACC_GRP28]]
17106 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE43:%.*]]
17107 // CHECK23: omp.body.continue43:
17108 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC44:%.*]]
17109 // CHECK23: omp.inner.for.inc44:
17110 // CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28]]
17111 // CHECK23-NEXT: [[ADD45:%.*]] = add nsw i32 [[TMP24]], 1
17112 // CHECK23-NEXT: store i32 [[ADD45]], ptr [[DOTOMP_IV35]], align 4, !llvm.access.group [[ACC_GRP28]]
17113 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND37]], !llvm.loop [[LOOP29:![0-9]+]]
17114 // CHECK23: omp.inner.for.end46:
17115 // CHECK23-NEXT: store i32 10, ptr [[I36]], align 4
17116 // CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[M]], align 4
17117 // CHECK23-NEXT: store i32 [[TMP25]], ptr [[DOTCAPTURE_EXPR_47]], align 4
17118 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB49]], align 4
17119 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB50]], align 4
17120 // CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB49]], align 4
17121 // CHECK23-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV51]], align 4
17122 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND53:%.*]]
17123 // CHECK23: omp.inner.for.cond53:
17124 // CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
17125 // CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB50]], align 4, !llvm.access.group [[ACC_GRP31]]
17126 // CHECK23-NEXT: [[CMP54:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]]
17127 // CHECK23-NEXT: br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END62:%.*]]
17128 // CHECK23: omp.inner.for.body55:
17129 // CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31]]
17130 // CHECK23-NEXT: [[MUL56:%.*]] = mul nsw i32 [[TMP29]], 1
17131 // CHECK23-NEXT: [[ADD57:%.*]] = add nsw i32 0, [[MUL56]]
17132 // CHECK23-NEXT: store i32 [[ADD57]], ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP31]]
17133 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[I52]], align 4, !llvm.access.group [[ACC_GRP31]]
17134 // CHECK23-NEXT: [[ARRAYIDX58:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP30]]
17135 // CHECK23-NEXT: store i32 0, ptr [[ARRAYIDX58]], align 4, !llvm.access.group [[ACC_GRP31]]
17136 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE59:%.*]]
17137 // CHECK23: omp.body.continue59:
17138 // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC60:%.*]]
17139 // CHECK23: omp.inner.for.inc60:
17140 // CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31]]
17141 // CHECK23-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP31]], 1
17142 // CHECK23-NEXT: store i32 [[ADD61]], ptr [[DOTOMP_IV51]], align 4, !llvm.access.group [[ACC_GRP31]]
17143 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP32:![0-9]+]]
17144 // CHECK23: omp.inner.for.end62:
17145 // CHECK23-NEXT: store i32 10, ptr [[I52]], align 4
17146 // CHECK23-NEXT: ret i32 0