[clang] Implement lifetime analysis for lifetime_capture_by(X) (#115921)
[llvm-project.git] / clang / test / OpenMP / nvptx_target_simd_codegen.cpp
blob9f98c18ab1dcf830143d963dd42670c1594e7372
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test target codegen - host bc file has to be created first.
3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
4 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK45-64
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK45-32
7 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK45-32-EX
9 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
10 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK-64
11 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
12 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32
13 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32-EX
15 // expected-no-diagnostics
16 #ifndef HEADER
17 #define HEADER
19 // Check that the execution mode of all 2 target regions on the gpu is set to NonSPMD Mode.
21 #define N 1000
23 template<typename tx>
24 tx ftemplate(int n) {
25 tx a[N];
26 short aa[N];
27 tx b[10];
29 #pragma omp target simd
30 for(int i = 0; i < n; i++) {
31 a[i] = 1;
34 #pragma omp target simd
35 for (int i = 0; i < n; i++) {
36 aa[i] += 1;
39 #pragma omp target simd
40 for(int i = 0; i < 10; i++) {
41 b[i] += 1;
44 #pragma omp target simd reduction(+:n)
45 for(int i = 0; i < 10; i++) {
46 b[i] += 1;
49 return a[0];
52 int bar(int n){
53 int a = 0;
55 a += ftemplate<int>(n);
57 return a;
60 #endif
61 // CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
62 // CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
63 // CHECK45-64-NEXT: entry:
64 // CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
65 // CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
66 // CHECK45-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
67 // CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
68 // CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
69 // CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
70 // CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
71 // CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
72 // CHECK45-64-NEXT: [[I3:%.*]] = alloca i32, align 4
73 // CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
74 // CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
75 // CHECK45-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
76 // CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
77 // CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
78 // CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
79 // CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
80 // CHECK45-64: user_code.entry:
81 // CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
82 // CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
83 // CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
84 // CHECK45-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
85 // CHECK45-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
86 // CHECK45-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
87 // CHECK45-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
88 // CHECK45-64-NEXT: store i32 0, ptr [[I]], align 4
89 // CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
90 // CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
91 // CHECK45-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
92 // CHECK45-64: simd.if.then:
93 // CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
94 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
95 // CHECK45-64: omp.inner.for.cond:
96 // CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
97 // CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
98 // CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
99 // CHECK45-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
100 // CHECK45-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
101 // CHECK45-64: omp.inner.for.body:
102 // CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
103 // CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
104 // CHECK45-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
105 // CHECK45-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
106 // CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
107 // CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
108 // CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
109 // CHECK45-64-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
110 // CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
111 // CHECK45-64: omp.body.continue:
112 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
113 // CHECK45-64: omp.inner.for.inc:
114 // CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
115 // CHECK45-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
116 // CHECK45-64-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
117 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
118 // CHECK45-64: worker.exit:
119 // CHECK45-64-NEXT: ret void
120 // CHECK45-64: omp.inner.for.end:
121 // CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
122 // CHECK45-64-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
123 // CHECK45-64-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
124 // CHECK45-64-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
125 // CHECK45-64-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
126 // CHECK45-64-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
127 // CHECK45-64-NEXT: br label [[SIMD_IF_END]]
128 // CHECK45-64: simd.if.end:
129 // CHECK45-64-NEXT: call void @__kmpc_target_deinit()
130 // CHECK45-64-NEXT: ret void
133 // CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
134 // CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
135 // CHECK45-64-NEXT: entry:
136 // CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
137 // CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
138 // CHECK45-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
139 // CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
140 // CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
141 // CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
142 // CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
143 // CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
144 // CHECK45-64-NEXT: [[I3:%.*]] = alloca i32, align 4
145 // CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
146 // CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
147 // CHECK45-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
148 // CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
149 // CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
150 // CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
151 // CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
152 // CHECK45-64: user_code.entry:
153 // CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
154 // CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
155 // CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
156 // CHECK45-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
157 // CHECK45-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
158 // CHECK45-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
159 // CHECK45-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
160 // CHECK45-64-NEXT: store i32 0, ptr [[I]], align 4
161 // CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
162 // CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
163 // CHECK45-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
164 // CHECK45-64: simd.if.then:
165 // CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
166 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
167 // CHECK45-64: omp.inner.for.cond:
168 // CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
169 // CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
170 // CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
171 // CHECK45-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
172 // CHECK45-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
173 // CHECK45-64: omp.inner.for.body:
174 // CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
175 // CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
176 // CHECK45-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
177 // CHECK45-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
178 // CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
179 // CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
180 // CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
181 // CHECK45-64-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
182 // CHECK45-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
183 // CHECK45-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
184 // CHECK45-64-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
185 // CHECK45-64-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
186 // CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
187 // CHECK45-64: omp.body.continue:
188 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
189 // CHECK45-64: omp.inner.for.inc:
190 // CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
191 // CHECK45-64-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
192 // CHECK45-64-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
193 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
194 // CHECK45-64: worker.exit:
195 // CHECK45-64-NEXT: ret void
196 // CHECK45-64: omp.inner.for.end:
197 // CHECK45-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
198 // CHECK45-64-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
199 // CHECK45-64-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
200 // CHECK45-64-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
201 // CHECK45-64-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
202 // CHECK45-64-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
203 // CHECK45-64-NEXT: br label [[SIMD_IF_END]]
204 // CHECK45-64: simd.if.end:
205 // CHECK45-64-NEXT: call void @__kmpc_target_deinit()
206 // CHECK45-64-NEXT: ret void
209 // CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
210 // CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
211 // CHECK45-64-NEXT: entry:
212 // CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
213 // CHECK45-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
214 // CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
215 // CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
216 // CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
217 // CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
218 // CHECK45-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
219 // CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
220 // CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
221 // CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
222 // CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
223 // CHECK45-64: user_code.entry:
224 // CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
225 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
226 // CHECK45-64: omp.inner.for.cond:
227 // CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
228 // CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
229 // CHECK45-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
230 // CHECK45-64: omp.inner.for.body:
231 // CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
232 // CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
233 // CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
234 // CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
235 // CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
236 // CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
237 // CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
238 // CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
239 // CHECK45-64-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
240 // CHECK45-64-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
241 // CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
242 // CHECK45-64: omp.body.continue:
243 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
244 // CHECK45-64: omp.inner.for.inc:
245 // CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
246 // CHECK45-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
247 // CHECK45-64-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
248 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
249 // CHECK45-64: worker.exit:
250 // CHECK45-64-NEXT: ret void
251 // CHECK45-64: omp.inner.for.end:
252 // CHECK45-64-NEXT: store i32 10, ptr [[I]], align 4
253 // CHECK45-64-NEXT: call void @__kmpc_target_deinit()
254 // CHECK45-64-NEXT: ret void
257 // CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
258 // CHECK45-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
259 // CHECK45-64-NEXT: entry:
260 // CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
261 // CHECK45-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
262 // CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
263 // CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
264 // CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
265 // CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
266 // CHECK45-64-NEXT: [[N1:%.*]] = alloca i32, align 4
267 // CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
268 // CHECK45-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
269 // CHECK45-64-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
270 // CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
271 // CHECK45-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 8
272 // CHECK45-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
273 // CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
274 // CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
275 // CHECK45-64: user_code.entry:
276 // CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
277 // CHECK45-64-NEXT: store i32 0, ptr [[N1]], align 4
278 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
279 // CHECK45-64: omp.inner.for.cond:
280 // CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
281 // CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
282 // CHECK45-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
283 // CHECK45-64: omp.inner.for.body:
284 // CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
285 // CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
286 // CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
287 // CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
288 // CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
289 // CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64
290 // CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
291 // CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
292 // CHECK45-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
293 // CHECK45-64-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
294 // CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
295 // CHECK45-64: omp.body.continue:
296 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
297 // CHECK45-64: omp.inner.for.inc:
298 // CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
299 // CHECK45-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
300 // CHECK45-64-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
301 // CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
302 // CHECK45-64: worker.exit:
303 // CHECK45-64-NEXT: ret void
304 // CHECK45-64: omp.inner.for.end:
305 // CHECK45-64-NEXT: store i32 10, ptr [[I]], align 4
306 // CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
307 // CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
308 // CHECK45-64-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
309 // CHECK45-64-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
310 // CHECK45-64-NEXT: call void @__kmpc_target_deinit()
311 // CHECK45-64-NEXT: ret void
314 // CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
315 // CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
316 // CHECK45-32-NEXT: entry:
317 // CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
318 // CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
319 // CHECK45-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
320 // CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
321 // CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
322 // CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
323 // CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
324 // CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
325 // CHECK45-32-NEXT: [[I3:%.*]] = alloca i32, align 4
326 // CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
327 // CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
328 // CHECK45-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
329 // CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
330 // CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
331 // CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
332 // CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
333 // CHECK45-32: user_code.entry:
334 // CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
335 // CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
336 // CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
337 // CHECK45-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
338 // CHECK45-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
339 // CHECK45-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
340 // CHECK45-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
341 // CHECK45-32-NEXT: store i32 0, ptr [[I]], align 4
342 // CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
343 // CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
344 // CHECK45-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
345 // CHECK45-32: simd.if.then:
346 // CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
347 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
348 // CHECK45-32: omp.inner.for.cond:
349 // CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
350 // CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
351 // CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
352 // CHECK45-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
353 // CHECK45-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
354 // CHECK45-32: omp.inner.for.body:
355 // CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
356 // CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
357 // CHECK45-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
358 // CHECK45-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
359 // CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
360 // CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
361 // CHECK45-32-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
362 // CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
363 // CHECK45-32: omp.body.continue:
364 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
365 // CHECK45-32: omp.inner.for.inc:
366 // CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
367 // CHECK45-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
368 // CHECK45-32-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
369 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
370 // CHECK45-32: worker.exit:
371 // CHECK45-32-NEXT: ret void
372 // CHECK45-32: omp.inner.for.end:
373 // CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
374 // CHECK45-32-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
375 // CHECK45-32-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
376 // CHECK45-32-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
377 // CHECK45-32-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
378 // CHECK45-32-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
379 // CHECK45-32-NEXT: br label [[SIMD_IF_END]]
380 // CHECK45-32: simd.if.end:
381 // CHECK45-32-NEXT: call void @__kmpc_target_deinit()
382 // CHECK45-32-NEXT: ret void
385 // CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
386 // CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
387 // CHECK45-32-NEXT: entry:
388 // CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
389 // CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
390 // CHECK45-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
391 // CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
392 // CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
393 // CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
394 // CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
395 // CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
396 // CHECK45-32-NEXT: [[I3:%.*]] = alloca i32, align 4
397 // CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
398 // CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
399 // CHECK45-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
400 // CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
401 // CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
402 // CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
403 // CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
404 // CHECK45-32: user_code.entry:
405 // CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
406 // CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
407 // CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
408 // CHECK45-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
409 // CHECK45-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
410 // CHECK45-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
411 // CHECK45-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
412 // CHECK45-32-NEXT: store i32 0, ptr [[I]], align 4
413 // CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
414 // CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
415 // CHECK45-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
416 // CHECK45-32: simd.if.then:
417 // CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
418 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
419 // CHECK45-32: omp.inner.for.cond:
420 // CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
421 // CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
422 // CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
423 // CHECK45-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
424 // CHECK45-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
425 // CHECK45-32: omp.inner.for.body:
426 // CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
427 // CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
428 // CHECK45-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
429 // CHECK45-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
430 // CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
431 // CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
432 // CHECK45-32-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
433 // CHECK45-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
434 // CHECK45-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
435 // CHECK45-32-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
436 // CHECK45-32-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
437 // CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
438 // CHECK45-32: omp.body.continue:
439 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
440 // CHECK45-32: omp.inner.for.inc:
441 // CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
442 // CHECK45-32-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
443 // CHECK45-32-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
444 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
445 // CHECK45-32: worker.exit:
446 // CHECK45-32-NEXT: ret void
447 // CHECK45-32: omp.inner.for.end:
448 // CHECK45-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
449 // CHECK45-32-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
450 // CHECK45-32-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
451 // CHECK45-32-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
452 // CHECK45-32-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
453 // CHECK45-32-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
454 // CHECK45-32-NEXT: br label [[SIMD_IF_END]]
455 // CHECK45-32: simd.if.end:
456 // CHECK45-32-NEXT: call void @__kmpc_target_deinit()
457 // CHECK45-32-NEXT: ret void
460 // CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
461 // CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
462 // CHECK45-32-NEXT: entry:
463 // CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
464 // CHECK45-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
465 // CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
466 // CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
467 // CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
468 // CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
469 // CHECK45-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
470 // CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
471 // CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
472 // CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
473 // CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
474 // CHECK45-32: user_code.entry:
475 // CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
476 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
477 // CHECK45-32: omp.inner.for.cond:
478 // CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
479 // CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
480 // CHECK45-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
481 // CHECK45-32: omp.inner.for.body:
482 // CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
483 // CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
484 // CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
485 // CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
486 // CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
487 // CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
488 // CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
489 // CHECK45-32-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
490 // CHECK45-32-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
491 // CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
492 // CHECK45-32: omp.body.continue:
493 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
494 // CHECK45-32: omp.inner.for.inc:
495 // CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
496 // CHECK45-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
497 // CHECK45-32-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
498 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
499 // CHECK45-32: worker.exit:
500 // CHECK45-32-NEXT: ret void
501 // CHECK45-32: omp.inner.for.end:
502 // CHECK45-32-NEXT: store i32 10, ptr [[I]], align 4
503 // CHECK45-32-NEXT: call void @__kmpc_target_deinit()
504 // CHECK45-32-NEXT: ret void
507 // CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
508 // CHECK45-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
509 // CHECK45-32-NEXT: entry:
510 // CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
511 // CHECK45-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
512 // CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
513 // CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
514 // CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
515 // CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
516 // CHECK45-32-NEXT: [[N1:%.*]] = alloca i32, align 4
517 // CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
518 // CHECK45-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
519 // CHECK45-32-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
520 // CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
521 // CHECK45-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
522 // CHECK45-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
523 // CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
524 // CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
525 // CHECK45-32: user_code.entry:
526 // CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
527 // CHECK45-32-NEXT: store i32 0, ptr [[N1]], align 4
528 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
529 // CHECK45-32: omp.inner.for.cond:
530 // CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
531 // CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
532 // CHECK45-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
533 // CHECK45-32: omp.inner.for.body:
534 // CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
535 // CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
536 // CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
537 // CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
538 // CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
539 // CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
540 // CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
541 // CHECK45-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
542 // CHECK45-32-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
543 // CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
544 // CHECK45-32: omp.body.continue:
545 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
546 // CHECK45-32: omp.inner.for.inc:
547 // CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
548 // CHECK45-32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
549 // CHECK45-32-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
550 // CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
551 // CHECK45-32: worker.exit:
552 // CHECK45-32-NEXT: ret void
553 // CHECK45-32: omp.inner.for.end:
554 // CHECK45-32-NEXT: store i32 10, ptr [[I]], align 4
555 // CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
556 // CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
557 // CHECK45-32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
558 // CHECK45-32-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
559 // CHECK45-32-NEXT: call void @__kmpc_target_deinit()
560 // CHECK45-32-NEXT: ret void
563 // CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
564 // CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
565 // CHECK45-32-EX-NEXT: entry:
566 // CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
567 // CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
568 // CHECK45-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
569 // CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
570 // CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
571 // CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
572 // CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
573 // CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
574 // CHECK45-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
575 // CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
576 // CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
577 // CHECK45-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
578 // CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
579 // CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
580 // CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
581 // CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
582 // CHECK45-32-EX: user_code.entry:
583 // CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
584 // CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
585 // CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
586 // CHECK45-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
587 // CHECK45-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
588 // CHECK45-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
589 // CHECK45-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
590 // CHECK45-32-EX-NEXT: store i32 0, ptr [[I]], align 4
591 // CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
592 // CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
593 // CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
594 // CHECK45-32-EX: simd.if.then:
595 // CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
596 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
597 // CHECK45-32-EX: omp.inner.for.cond:
598 // CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
599 // CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
600 // CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
601 // CHECK45-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
602 // CHECK45-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
603 // CHECK45-32-EX: omp.inner.for.body:
604 // CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
605 // CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
606 // CHECK45-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
607 // CHECK45-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
608 // CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
609 // CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
610 // CHECK45-32-EX-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
611 // CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
612 // CHECK45-32-EX: omp.body.continue:
613 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
614 // CHECK45-32-EX: omp.inner.for.inc:
615 // CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
616 // CHECK45-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
617 // CHECK45-32-EX-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
618 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
619 // CHECK45-32-EX: worker.exit:
620 // CHECK45-32-EX-NEXT: ret void
621 // CHECK45-32-EX: omp.inner.for.end:
622 // CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
623 // CHECK45-32-EX-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
624 // CHECK45-32-EX-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
625 // CHECK45-32-EX-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
626 // CHECK45-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
627 // CHECK45-32-EX-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
628 // CHECK45-32-EX-NEXT: br label [[SIMD_IF_END]]
629 // CHECK45-32-EX: simd.if.end:
630 // CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
631 // CHECK45-32-EX-NEXT: ret void
634 // CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
635 // CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
636 // CHECK45-32-EX-NEXT: entry:
637 // CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
638 // CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
639 // CHECK45-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
640 // CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
641 // CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
642 // CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
643 // CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
644 // CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
645 // CHECK45-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
646 // CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
647 // CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
648 // CHECK45-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
649 // CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
650 // CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
651 // CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
652 // CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
653 // CHECK45-32-EX: user_code.entry:
654 // CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
655 // CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
656 // CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
657 // CHECK45-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
658 // CHECK45-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
659 // CHECK45-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
660 // CHECK45-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
661 // CHECK45-32-EX-NEXT: store i32 0, ptr [[I]], align 4
662 // CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
663 // CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
664 // CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
665 // CHECK45-32-EX: simd.if.then:
666 // CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
667 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
668 // CHECK45-32-EX: omp.inner.for.cond:
669 // CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
670 // CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
671 // CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
672 // CHECK45-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
673 // CHECK45-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
674 // CHECK45-32-EX: omp.inner.for.body:
675 // CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
676 // CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
677 // CHECK45-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
678 // CHECK45-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
679 // CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
680 // CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
681 // CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
682 // CHECK45-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
683 // CHECK45-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
684 // CHECK45-32-EX-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
685 // CHECK45-32-EX-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
686 // CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
687 // CHECK45-32-EX: omp.body.continue:
688 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
689 // CHECK45-32-EX: omp.inner.for.inc:
690 // CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
691 // CHECK45-32-EX-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
692 // CHECK45-32-EX-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
693 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
694 // CHECK45-32-EX: worker.exit:
695 // CHECK45-32-EX-NEXT: ret void
696 // CHECK45-32-EX: omp.inner.for.end:
697 // CHECK45-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
698 // CHECK45-32-EX-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
699 // CHECK45-32-EX-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
700 // CHECK45-32-EX-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
701 // CHECK45-32-EX-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
702 // CHECK45-32-EX-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
703 // CHECK45-32-EX-NEXT: br label [[SIMD_IF_END]]
704 // CHECK45-32-EX: simd.if.end:
705 // CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
706 // CHECK45-32-EX-NEXT: ret void
709 // CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
710 // CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
711 // CHECK45-32-EX-NEXT: entry:
712 // CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
713 // CHECK45-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
714 // CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
715 // CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
716 // CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
717 // CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
718 // CHECK45-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
719 // CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
720 // CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
721 // CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
722 // CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
723 // CHECK45-32-EX: user_code.entry:
724 // CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
725 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
726 // CHECK45-32-EX: omp.inner.for.cond:
727 // CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
728 // CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
729 // CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
730 // CHECK45-32-EX: omp.inner.for.body:
731 // CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
732 // CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
733 // CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
734 // CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
735 // CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
736 // CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
737 // CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
738 // CHECK45-32-EX-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
739 // CHECK45-32-EX-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
740 // CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
741 // CHECK45-32-EX: omp.body.continue:
742 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
743 // CHECK45-32-EX: omp.inner.for.inc:
744 // CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
745 // CHECK45-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
746 // CHECK45-32-EX-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
747 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
748 // CHECK45-32-EX: worker.exit:
749 // CHECK45-32-EX-NEXT: ret void
750 // CHECK45-32-EX: omp.inner.for.end:
751 // CHECK45-32-EX-NEXT: store i32 10, ptr [[I]], align 4
752 // CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
753 // CHECK45-32-EX-NEXT: ret void
756 // CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
757 // CHECK45-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
758 // CHECK45-32-EX-NEXT: entry:
759 // CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
760 // CHECK45-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
761 // CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
762 // CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
763 // CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
764 // CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
765 // CHECK45-32-EX-NEXT: [[N1:%.*]] = alloca i32, align 4
766 // CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
767 // CHECK45-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
768 // CHECK45-32-EX-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
769 // CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
770 // CHECK45-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
771 // CHECK45-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
772 // CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
773 // CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
774 // CHECK45-32-EX: user_code.entry:
775 // CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
776 // CHECK45-32-EX-NEXT: store i32 0, ptr [[N1]], align 4
777 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
778 // CHECK45-32-EX: omp.inner.for.cond:
779 // CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
780 // CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
781 // CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
782 // CHECK45-32-EX: omp.inner.for.body:
783 // CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
784 // CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
785 // CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
786 // CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
787 // CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
788 // CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
789 // CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
790 // CHECK45-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
791 // CHECK45-32-EX-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
792 // CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
793 // CHECK45-32-EX: omp.body.continue:
794 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
795 // CHECK45-32-EX: omp.inner.for.inc:
796 // CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
797 // CHECK45-32-EX-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
798 // CHECK45-32-EX-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
799 // CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
800 // CHECK45-32-EX: worker.exit:
801 // CHECK45-32-EX-NEXT: ret void
802 // CHECK45-32-EX: omp.inner.for.end:
803 // CHECK45-32-EX-NEXT: store i32 10, ptr [[I]], align 4
804 // CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
805 // CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
806 // CHECK45-32-EX-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
807 // CHECK45-32-EX-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
808 // CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
809 // CHECK45-32-EX-NEXT: ret void
812 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
813 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
814 // CHECK-64-NEXT: entry:
815 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
816 // CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
817 // CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
818 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
819 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
820 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
821 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
822 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
823 // CHECK-64-NEXT: [[I3:%.*]] = alloca i32, align 4
824 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
825 // CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
826 // CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
827 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
828 // CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
829 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
830 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
831 // CHECK-64: user_code.entry:
832 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
833 // CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
834 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
835 // CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
836 // CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
837 // CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
838 // CHECK-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
839 // CHECK-64-NEXT: store i32 0, ptr [[I]], align 4
840 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
841 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
842 // CHECK-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
843 // CHECK-64: simd.if.then:
844 // CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
845 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
846 // CHECK-64: omp.inner.for.cond:
847 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
848 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
849 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
850 // CHECK-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
851 // CHECK-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
852 // CHECK-64: omp.inner.for.body:
853 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
854 // CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
855 // CHECK-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
856 // CHECK-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
857 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
858 // CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
859 // CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
860 // CHECK-64-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
861 // CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
862 // CHECK-64: omp.body.continue:
863 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
864 // CHECK-64: omp.inner.for.inc:
865 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
866 // CHECK-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
867 // CHECK-64-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
868 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
869 // CHECK-64: worker.exit:
870 // CHECK-64-NEXT: ret void
871 // CHECK-64: omp.inner.for.end:
872 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
873 // CHECK-64-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
874 // CHECK-64-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
875 // CHECK-64-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
876 // CHECK-64-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
877 // CHECK-64-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
878 // CHECK-64-NEXT: br label [[SIMD_IF_END]]
879 // CHECK-64: simd.if.end:
880 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
881 // CHECK-64-NEXT: ret void
884 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
885 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
886 // CHECK-64-NEXT: entry:
887 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
888 // CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
889 // CHECK-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
890 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
891 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
892 // CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
893 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
894 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
895 // CHECK-64-NEXT: [[I3:%.*]] = alloca i32, align 4
896 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
897 // CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
898 // CHECK-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
899 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
900 // CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
901 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
902 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
903 // CHECK-64: user_code.entry:
904 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
905 // CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
906 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
907 // CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
908 // CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
909 // CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
910 // CHECK-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
911 // CHECK-64-NEXT: store i32 0, ptr [[I]], align 4
912 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
913 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
914 // CHECK-64-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
915 // CHECK-64: simd.if.then:
916 // CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
917 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
918 // CHECK-64: omp.inner.for.cond:
919 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
920 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
921 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
922 // CHECK-64-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
923 // CHECK-64-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
924 // CHECK-64: omp.inner.for.body:
925 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
926 // CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
927 // CHECK-64-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
928 // CHECK-64-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
929 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
930 // CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
931 // CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
932 // CHECK-64-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
933 // CHECK-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
934 // CHECK-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
935 // CHECK-64-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
936 // CHECK-64-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
937 // CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
938 // CHECK-64: omp.body.continue:
939 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
940 // CHECK-64: omp.inner.for.inc:
941 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
942 // CHECK-64-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
943 // CHECK-64-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
944 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
945 // CHECK-64: worker.exit:
946 // CHECK-64-NEXT: ret void
947 // CHECK-64: omp.inner.for.end:
948 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
949 // CHECK-64-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
950 // CHECK-64-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
951 // CHECK-64-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
952 // CHECK-64-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
953 // CHECK-64-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
954 // CHECK-64-NEXT: br label [[SIMD_IF_END]]
955 // CHECK-64: simd.if.end:
956 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
957 // CHECK-64-NEXT: ret void
960 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
961 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
962 // CHECK-64-NEXT: entry:
963 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
964 // CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
965 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
966 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
967 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
968 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
969 // CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
970 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
971 // CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
972 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
973 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
974 // CHECK-64: user_code.entry:
975 // CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
976 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
977 // CHECK-64: omp.inner.for.cond:
978 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
979 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
980 // CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
981 // CHECK-64: omp.inner.for.body:
982 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
983 // CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
984 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
985 // CHECK-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
986 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
987 // CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
988 // CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
989 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
990 // CHECK-64-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
991 // CHECK-64-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
992 // CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
993 // CHECK-64: omp.body.continue:
994 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
995 // CHECK-64: omp.inner.for.inc:
996 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
997 // CHECK-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
998 // CHECK-64-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
999 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
1000 // CHECK-64: worker.exit:
1001 // CHECK-64-NEXT: ret void
1002 // CHECK-64: omp.inner.for.end:
1003 // CHECK-64-NEXT: store i32 10, ptr [[I]], align 4
1004 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
1005 // CHECK-64-NEXT: ret void
1008 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
1009 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
1010 // CHECK-64-NEXT: entry:
1011 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
1012 // CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
1013 // CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8
1014 // CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
1015 // CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1016 // CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
1017 // CHECK-64-NEXT: [[N1:%.*]] = alloca i32, align 4
1018 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
1019 // CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
1020 // CHECK-64-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 8
1021 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
1022 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 8
1023 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
1024 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1025 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1026 // CHECK-64: user_code.entry:
1027 // CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1028 // CHECK-64-NEXT: store i32 0, ptr [[N1]], align 4
1029 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1030 // CHECK-64: omp.inner.for.cond:
1031 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
1032 // CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
1033 // CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1034 // CHECK-64: omp.inner.for.body:
1035 // CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1036 // CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
1037 // CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1038 // CHECK-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
1039 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
1040 // CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64
1041 // CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
1042 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
1043 // CHECK-64-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
1044 // CHECK-64-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
1045 // CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1046 // CHECK-64: omp.body.continue:
1047 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1048 // CHECK-64: omp.inner.for.inc:
1049 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1050 // CHECK-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
1051 // CHECK-64-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1052 // CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
1053 // CHECK-64: worker.exit:
1054 // CHECK-64-NEXT: ret void
1055 // CHECK-64: omp.inner.for.end:
1056 // CHECK-64-NEXT: store i32 10, ptr [[I]], align 4
1057 // CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
1058 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
1059 // CHECK-64-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
1060 // CHECK-64-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
1061 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
1062 // CHECK-64-NEXT: ret void
1065 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
1066 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
1067 // CHECK-32-NEXT: entry:
1068 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1069 // CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
1070 // CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1071 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1072 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1073 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1074 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1075 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1076 // CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
1077 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1078 // CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
1079 // CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1080 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1081 // CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
1082 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1083 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1084 // CHECK-32: user_code.entry:
1085 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
1086 // CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
1087 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1088 // CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
1089 // CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1090 // CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1091 // CHECK-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
1092 // CHECK-32-NEXT: store i32 0, ptr [[I]], align 4
1093 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1094 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
1095 // CHECK-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
1096 // CHECK-32: simd.if.then:
1097 // CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1098 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1099 // CHECK-32: omp.inner.for.cond:
1100 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
1101 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
1102 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
1103 // CHECK-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
1104 // CHECK-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1105 // CHECK-32: omp.inner.for.body:
1106 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
1107 // CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1108 // CHECK-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
1109 // CHECK-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
1110 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
1111 // CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
1112 // CHECK-32-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
1113 // CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1114 // CHECK-32: omp.body.continue:
1115 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1116 // CHECK-32: omp.inner.for.inc:
1117 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
1118 // CHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
1119 // CHECK-32-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
1120 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
1121 // CHECK-32: worker.exit:
1122 // CHECK-32-NEXT: ret void
1123 // CHECK-32: omp.inner.for.end:
1124 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1125 // CHECK-32-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
1126 // CHECK-32-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
1127 // CHECK-32-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
1128 // CHECK-32-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
1129 // CHECK-32-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
1130 // CHECK-32-NEXT: br label [[SIMD_IF_END]]
1131 // CHECK-32: simd.if.end:
1132 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
1133 // CHECK-32-NEXT: ret void
1136 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
1137 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
1138 // CHECK-32-NEXT: entry:
1139 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1140 // CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
1141 // CHECK-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
1142 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1143 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1144 // CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1145 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1146 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1147 // CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
1148 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1149 // CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
1150 // CHECK-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
1151 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
1152 // CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
1153 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1154 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1155 // CHECK-32: user_code.entry:
1156 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
1157 // CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
1158 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1159 // CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
1160 // CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1161 // CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1162 // CHECK-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
1163 // CHECK-32-NEXT: store i32 0, ptr [[I]], align 4
1164 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1165 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
1166 // CHECK-32-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
1167 // CHECK-32: simd.if.then:
1168 // CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1169 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1170 // CHECK-32: omp.inner.for.cond:
1171 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
1172 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
1173 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
1174 // CHECK-32-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
1175 // CHECK-32-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1176 // CHECK-32: omp.inner.for.body:
1177 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
1178 // CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1179 // CHECK-32-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
1180 // CHECK-32-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
1181 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
1182 // CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
1183 // CHECK-32-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
1184 // CHECK-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
1185 // CHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
1186 // CHECK-32-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
1187 // CHECK-32-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
1188 // CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1189 // CHECK-32: omp.body.continue:
1190 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1191 // CHECK-32: omp.inner.for.inc:
1192 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
1193 // CHECK-32-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
1194 // CHECK-32-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
1195 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
1196 // CHECK-32: worker.exit:
1197 // CHECK-32-NEXT: ret void
1198 // CHECK-32: omp.inner.for.end:
1199 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1200 // CHECK-32-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
1201 // CHECK-32-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
1202 // CHECK-32-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
1203 // CHECK-32-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
1204 // CHECK-32-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
1205 // CHECK-32-NEXT: br label [[SIMD_IF_END]]
1206 // CHECK-32: simd.if.end:
1207 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
1208 // CHECK-32-NEXT: ret void
1211 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
1212 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
1213 // CHECK-32-NEXT: entry:
1214 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1215 // CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1216 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1217 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1218 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1219 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1220 // CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1221 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1222 // CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
1223 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1224 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1225 // CHECK-32: user_code.entry:
1226 // CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1227 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1228 // CHECK-32: omp.inner.for.cond:
1229 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
1230 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
1231 // CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1232 // CHECK-32: omp.inner.for.body:
1233 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
1234 // CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
1235 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1236 // CHECK-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
1237 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
1238 // CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
1239 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
1240 // CHECK-32-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
1241 // CHECK-32-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
1242 // CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1243 // CHECK-32: omp.body.continue:
1244 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1245 // CHECK-32: omp.inner.for.inc:
1246 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
1247 // CHECK-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
1248 // CHECK-32-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
1249 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
1250 // CHECK-32: worker.exit:
1251 // CHECK-32-NEXT: ret void
1252 // CHECK-32: omp.inner.for.end:
1253 // CHECK-32-NEXT: store i32 10, ptr [[I]], align 4
1254 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
1255 // CHECK-32-NEXT: ret void
1258 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
1259 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
1260 // CHECK-32-NEXT: entry:
1261 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1262 // CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1263 // CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
1264 // CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
1265 // CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1266 // CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
1267 // CHECK-32-NEXT: [[N1:%.*]] = alloca i32, align 4
1268 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1269 // CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1270 // CHECK-32-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
1271 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1272 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
1273 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
1274 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1275 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1276 // CHECK-32: user_code.entry:
1277 // CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1278 // CHECK-32-NEXT: store i32 0, ptr [[N1]], align 4
1279 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1280 // CHECK-32: omp.inner.for.cond:
1281 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
1282 // CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
1283 // CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1284 // CHECK-32: omp.inner.for.body:
1285 // CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1286 // CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
1287 // CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1288 // CHECK-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
1289 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
1290 // CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
1291 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
1292 // CHECK-32-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
1293 // CHECK-32-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
1294 // CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1295 // CHECK-32: omp.body.continue:
1296 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1297 // CHECK-32: omp.inner.for.inc:
1298 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1299 // CHECK-32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
1300 // CHECK-32-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1301 // CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
1302 // CHECK-32: worker.exit:
1303 // CHECK-32-NEXT: ret void
1304 // CHECK-32: omp.inner.for.end:
1305 // CHECK-32-NEXT: store i32 10, ptr [[I]], align 4
1306 // CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
1307 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
1308 // CHECK-32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
1309 // CHECK-32-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
1310 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
1311 // CHECK-32-NEXT: ret void
1314 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29
1315 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0:[0-9]+]] {
1316 // CHECK-32-EX-NEXT: entry:
1317 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1318 // CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
1319 // CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1320 // CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
1321 // CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1322 // CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1323 // CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
1324 // CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1325 // CHECK-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
1326 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1327 // CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
1328 // CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1329 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1330 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29_kernel_environment, ptr [[DYN_PTR]])
1331 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1332 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1333 // CHECK-32-EX: user_code.entry:
1334 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
1335 // CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
1336 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1337 // CHECK-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
1338 // CHECK-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1339 // CHECK-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1340 // CHECK-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
1341 // CHECK-32-EX-NEXT: store i32 0, ptr [[I]], align 4
1342 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1343 // CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
1344 // CHECK-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
1345 // CHECK-32-EX: simd.if.then:
1346 // CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1347 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1348 // CHECK-32-EX: omp.inner.for.cond:
1349 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
1350 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP24]]
1351 // CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
1352 // CHECK-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
1353 // CHECK-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1354 // CHECK-32-EX: omp.inner.for.body:
1355 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
1356 // CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1357 // CHECK-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
1358 // CHECK-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
1359 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP24]]
1360 // CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP8]]
1361 // CHECK-32-EX-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]]
1362 // CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1363 // CHECK-32-EX: omp.body.continue:
1364 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1365 // CHECK-32-EX: omp.inner.for.inc:
1366 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
1367 // CHECK-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP9]], 1
1368 // CHECK-32-EX-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
1369 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
1370 // CHECK-32-EX: worker.exit:
1371 // CHECK-32-EX-NEXT: ret void
1372 // CHECK-32-EX: omp.inner.for.end:
1373 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1374 // CHECK-32-EX-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP10]], 0
1375 // CHECK-32-EX-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1
1376 // CHECK-32-EX-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1
1377 // CHECK-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]]
1378 // CHECK-32-EX-NEXT: store i32 [[ADD10]], ptr [[I3]], align 4
1379 // CHECK-32-EX-NEXT: br label [[SIMD_IF_END]]
1380 // CHECK-32-EX: simd.if.end:
1381 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1382 // CHECK-32-EX-NEXT: ret void
1385 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
1386 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0]] {
1387 // CHECK-32-EX-NEXT: entry:
1388 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1389 // CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
1390 // CHECK-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
1391 // CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
1392 // CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
1393 // CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
1394 // CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
1395 // CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1396 // CHECK-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
1397 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1398 // CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
1399 // CHECK-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
1400 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
1401 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
1402 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1403 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1404 // CHECK-32-EX: user_code.entry:
1405 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
1406 // CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4
1407 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1408 // CHECK-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
1409 // CHECK-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
1410 // CHECK-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
1411 // CHECK-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
1412 // CHECK-32-EX-NEXT: store i32 0, ptr [[I]], align 4
1413 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1414 // CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
1415 // CHECK-32-EX-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]]
1416 // CHECK-32-EX: simd.if.then:
1417 // CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1418 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1419 // CHECK-32-EX: omp.inner.for.cond:
1420 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
1421 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !llvm.access.group [[ACC_GRP28]]
1422 // CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1
1423 // CHECK-32-EX-NEXT: [[CMP4:%.*]] = icmp slt i32 [[TMP5]], [[ADD]]
1424 // CHECK-32-EX-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1425 // CHECK-32-EX: omp.inner.for.body:
1426 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
1427 // CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
1428 // CHECK-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL]]
1429 // CHECK-32-EX-NEXT: store i32 [[ADD5]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
1430 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP28]]
1431 // CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP8]]
1432 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
1433 // CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32
1434 // CHECK-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], 1
1435 // CHECK-32-EX-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16
1436 // CHECK-32-EX-NEXT: store i16 [[CONV7]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP28]]
1437 // CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1438 // CHECK-32-EX: omp.body.continue:
1439 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1440 // CHECK-32-EX: omp.inner.for.inc:
1441 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
1442 // CHECK-32-EX-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1
1443 // CHECK-32-EX-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
1444 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
1445 // CHECK-32-EX: worker.exit:
1446 // CHECK-32-EX-NEXT: ret void
1447 // CHECK-32-EX: omp.inner.for.end:
1448 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
1449 // CHECK-32-EX-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP11]], 0
1450 // CHECK-32-EX-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1
1451 // CHECK-32-EX-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1
1452 // CHECK-32-EX-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
1453 // CHECK-32-EX-NEXT: store i32 [[ADD12]], ptr [[I3]], align 4
1454 // CHECK-32-EX-NEXT: br label [[SIMD_IF_END]]
1455 // CHECK-32-EX: simd.if.end:
1456 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1457 // CHECK-32-EX-NEXT: ret void
1460 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39
1461 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
1462 // CHECK-32-EX-NEXT: entry:
1463 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1464 // CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1465 // CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
1466 // CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1467 // CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
1468 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1469 // CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1470 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1471 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l39_kernel_environment, ptr [[DYN_PTR]])
1472 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1473 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1474 // CHECK-32-EX: user_code.entry:
1475 // CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1476 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1477 // CHECK-32-EX: omp.inner.for.cond:
1478 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]]
1479 // CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
1480 // CHECK-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1481 // CHECK-32-EX: omp.inner.for.body:
1482 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
1483 // CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
1484 // CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1485 // CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
1486 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]]
1487 // CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP4]]
1488 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
1489 // CHECK-32-EX-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
1490 // CHECK-32-EX-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]]
1491 // CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1492 // CHECK-32-EX: omp.body.continue:
1493 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1494 // CHECK-32-EX: omp.inner.for.inc:
1495 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
1496 // CHECK-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
1497 // CHECK-32-EX-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]]
1498 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]]
1499 // CHECK-32-EX: worker.exit:
1500 // CHECK-32-EX-NEXT: ret void
1501 // CHECK-32-EX: omp.inner.for.end:
1502 // CHECK-32-EX-NEXT: store i32 10, ptr [[I]], align 4
1503 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1504 // CHECK-32-EX-NEXT: ret void
1507 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44
1508 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]]) #[[ATTR0]] {
1509 // CHECK-32-EX-NEXT: entry:
1510 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1511 // CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1512 // CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 4
1513 // CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
1514 // CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1515 // CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
1516 // CHECK-32-EX-NEXT: [[N1:%.*]] = alloca i32, align 4
1517 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1518 // CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1519 // CHECK-32-EX-NEXT: store ptr [[N]], ptr [[N_ADDR]], align 4
1520 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1521 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4
1522 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l44_kernel_environment, ptr [[DYN_PTR]])
1523 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1524 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1525 // CHECK-32-EX: user_code.entry:
1526 // CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4
1527 // CHECK-32-EX-NEXT: store i32 0, ptr [[N1]], align 4
1528 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1529 // CHECK-32-EX: omp.inner.for.cond:
1530 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]]
1531 // CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP3]], 10
1532 // CHECK-32-EX-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1533 // CHECK-32-EX: omp.inner.for.body:
1534 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1535 // CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
1536 // CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1537 // CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
1538 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]]
1539 // CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP5]]
1540 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
1541 // CHECK-32-EX-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
1542 // CHECK-32-EX-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]]
1543 // CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1544 // CHECK-32-EX: omp.body.continue:
1545 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1546 // CHECK-32-EX: omp.inner.for.inc:
1547 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1548 // CHECK-32-EX-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1
1549 // CHECK-32-EX-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]]
1550 // CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]]
1551 // CHECK-32-EX: worker.exit:
1552 // CHECK-32-EX-NEXT: ret void
1553 // CHECK-32-EX: omp.inner.for.end:
1554 // CHECK-32-EX-NEXT: store i32 10, ptr [[I]], align 4
1555 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP1]], align 4
1556 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[N1]], align 4
1557 // CHECK-32-EX-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
1558 // CHECK-32-EX-NEXT: store i32 [[ADD4]], ptr [[TMP1]], align 4
1559 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1560 // CHECK-32-EX-NEXT: ret void