[clang] Implement lifetime analysis for lifetime_capture_by(X) (#115921)
[llvm-project.git] / clang / test / OpenMP / parallel_copyin_combined_codegen.c
blobfa5b6603e50d8d4e9f6065c3607d13154b7a19a1
1 // RUN: %clang_cc1 -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s
2 // expected-no-diagnostics
4 #define N 100
6 int x;
7 #pragma omp threadprivate(x)
9 void test_omp_parallel_copyin(int *a) {
10 x = 1;
12 #pragma omp parallel copyin(x)
13 #pragma omp for
14 for (int i = 0; i < N; i++)
15 a[i] = i + x;
18 void test_omp_parallel_for_copyin(int *a) {
19 x = 2;
21 #pragma omp parallel for copyin(x)
22 for (int i = 0; i < N; i++)
23 a[i] = i + x;
26 void test_omp_parallel_for_simd_copyin(int *a) {
27 x = 3;
29 #pragma omp parallel for simd copyin(x)
30 for (int i = 0; i < N; i++)
31 a[i] = i + x;
34 void test_omp_parallel_sections_copyin(int *a, int *b) {
35 x = 4;
37 #pragma omp parallel sections copyin(x)
39 #pragma omp section
40 { *a = x; }
42 #pragma omp section
43 { *b = x; }
47 void test_omp_parallel_master_copyin(int *a) {
48 x = 5;
50 #pragma omp parallel master copyin(x)
51 for (int i = 0; i < N; i++)
52 a[i] = i + x;
55 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_copyin
56 // CHECK-SAME: (ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
57 // CHECK-NEXT: entry:
58 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
59 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
60 // CHECK-NEXT: [[TMP0:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
61 // CHECK-NEXT: store i32 1, ptr [[TMP0]], align 4
62 // CHECK-NEXT: [[TMP1:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
63 // CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3:[0-9]+]], i32 2, ptr @test_omp_parallel_copyin.omp_outlined, ptr [[A_ADDR]], ptr [[TMP1]])
64 // CHECK-NEXT: ret void
66 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_copyin.omp_outlined
67 // CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
68 // CHECK-NEXT: entry:
69 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
70 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
71 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
72 // CHECK-NEXT: [[X_ADDR:%.*]] = alloca ptr, align 8
73 // CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
74 // CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
75 // CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
76 // CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
77 // CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
78 // CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
79 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
80 // CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
81 // CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
82 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
83 // CHECK-NEXT: store ptr [[X]], ptr [[X_ADDR]], align 8
84 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
85 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR]], align 8
86 // CHECK-NEXT: [[TMP2:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
87 // CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[TMP1]] to i64
88 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i64
89 // CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
90 // CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
91 // CHECK: copyin.not.master:
92 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4
93 // CHECK-NEXT: store i32 [[TMP6]], ptr [[TMP2]], align 4
94 // CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
95 // CHECK: copyin.not.master.end:
96 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
97 // CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
98 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]])
99 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
100 // CHECK-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
101 // CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
102 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
103 // CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
104 // CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
105 // CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
106 // CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
107 // CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 99
108 // CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
109 // CHECK: cond.true:
110 // CHECK-NEXT: br label [[COND_END:%.*]]
111 // CHECK: cond.false:
112 // CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
113 // CHECK-NEXT: br label [[COND_END]]
114 // CHECK: cond.end:
115 // CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
116 // CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
117 // CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
118 // CHECK-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
119 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
120 // CHECK: omp.inner.for.cond:
121 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
122 // CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
123 // CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
124 // CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
125 // CHECK: omp.inner.for.body:
126 // CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
127 // CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
128 // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
129 // CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4
130 // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4
131 // CHECK-NEXT: [[TMP18:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
132 // CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
133 // CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
134 // CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP0]], align 8
135 // CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4
136 // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
137 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i64 [[IDXPROM]]
138 // CHECK-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4
139 // CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
140 // CHECK: omp.body.continue:
141 // CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
142 // CHECK: omp.inner.for.inc:
143 // CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
144 // CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], 1
145 // CHECK-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
146 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
147 // CHECK: omp.inner.for.end:
148 // CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
149 // CHECK: omp.loop.exit:
150 // CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
151 // CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
152 // CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP24]])
153 // CHECK-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
154 // CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
155 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB1]], i32 [[TMP26]])
156 // CHECK-NEXT: ret void
158 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_for_copyin
159 // CHECK-SAME: (ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
160 // CHECK-NEXT: entry:
161 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
162 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
163 // CHECK-NEXT: [[TMP0:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
164 // CHECK-NEXT: store i32 2, ptr [[TMP0]], align 4
165 // CHECK-NEXT: [[TMP1:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
166 // CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3:[0-9]+]], i32 2, ptr @test_omp_parallel_for_copyin.omp_outlined, ptr [[A_ADDR]], ptr [[TMP1]])
167 // CHECK-NEXT: ret void
169 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_for_copyin.omp_outlined
170 // CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
171 // CHECK-NEXT: entry:
172 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
173 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
174 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
175 // CHECK-NEXT: [[X_ADDR:%.*]] = alloca ptr, align 8
176 // CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
177 // CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
178 // CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
179 // CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
180 // CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
181 // CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
182 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
183 // CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
184 // CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
185 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
186 // CHECK-NEXT: store ptr [[X]], ptr [[X_ADDR]], align 8
187 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
188 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR]], align 8
189 // CHECK-NEXT: [[TMP2:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
190 // CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[TMP1]] to i64
191 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i64
192 // CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
193 // CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
194 // CHECK: copyin.not.master:
195 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4
196 // CHECK-NEXT: store i32 [[TMP6]], ptr [[TMP2]], align 4
197 // CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
198 // CHECK: copyin.not.master.end:
199 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
200 // CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
201 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]])
202 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
203 // CHECK-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
204 // CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
205 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
206 // CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
207 // CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
208 // CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
209 // CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
210 // CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 99
211 // CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
212 // CHECK: cond.true:
213 // CHECK-NEXT: br label [[COND_END:%.*]]
214 // CHECK: cond.false:
215 // CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
216 // CHECK-NEXT: br label [[COND_END]]
217 // CHECK: cond.end:
218 // CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
219 // CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
220 // CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
221 // CHECK-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
222 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
223 // CHECK: omp.inner.for.cond:
224 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
225 // CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
226 // CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
227 // CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
228 // CHECK: omp.inner.for.body:
229 // CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
230 // CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
231 // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
232 // CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4
233 // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4
234 // CHECK-NEXT: [[TMP18:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
235 // CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
236 // CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
237 // CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP0]], align 8
238 // CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4
239 // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
240 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i64 [[IDXPROM]]
241 // CHECK-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4
242 // CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
243 // CHECK: omp.body.continue:
244 // CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
245 // CHECK: omp.inner.for.inc:
246 // CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
247 // CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], 1
248 // CHECK-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
249 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
250 // CHECK: omp.inner.for.end:
251 // CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
252 // CHECK: omp.loop.exit:
253 // CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
254 // CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
255 // CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP24]])
256 // CHECK-NEXT: ret void
258 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_for_simd_copyin
259 // CHECK-SAME: (ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
260 // CHECK-NEXT: entry:
261 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
262 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
263 // CHECK-NEXT: [[TMP0:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
264 // CHECK-NEXT: store i32 3, ptr [[TMP0]], align 4
265 // CHECK-NEXT: [[TMP1:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
266 // CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3:[0-9]+]], i32 2, ptr @test_omp_parallel_for_simd_copyin.omp_outlined, ptr [[A_ADDR]], ptr [[TMP1]])
267 // CHECK-NEXT: ret void
269 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_for_simd_copyin.omp_outlined
270 // CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
271 // CHECK-NEXT: entry:
272 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
273 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
274 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
275 // CHECK-NEXT: [[X_ADDR:%.*]] = alloca ptr, align 8
276 // CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
277 // CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
278 // CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
279 // CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
280 // CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
281 // CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
282 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
283 // CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
284 // CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
285 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
286 // CHECK-NEXT: store ptr [[X]], ptr [[X_ADDR]], align 8
287 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
288 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR]], align 8
289 // CHECK-NEXT: [[TMP2:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
290 // CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[TMP1]] to i64
291 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i64
292 // CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
293 // CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
294 // CHECK: copyin.not.master:
295 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4
296 // CHECK-NEXT: store i32 [[TMP6]], ptr [[TMP2]], align 4
297 // CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
298 // CHECK: copyin.not.master.end:
299 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
300 // CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
301 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]])
302 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
303 // CHECK-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
304 // CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
305 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
306 // CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
307 // CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
308 // CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP10]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
309 // CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
310 // CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 99
311 // CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
312 // CHECK: cond.true:
313 // CHECK-NEXT: br label [[COND_END:%.*]]
314 // CHECK: cond.false:
315 // CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
316 // CHECK-NEXT: br label [[COND_END]]
317 // CHECK: cond.end:
318 // CHECK-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
319 // CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
320 // CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
321 // CHECK-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
322 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
323 // CHECK: omp.inner.for.cond:
324 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group
325 // CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group
326 // CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
327 // CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
328 // CHECK: omp.inner.for.body:
329 // CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group
330 // CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
331 // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
332 // CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group
333 // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group
334 // CHECK-NEXT: [[TMP18:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
335 // CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4, !llvm.access.group
336 // CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP17]], [[TMP19]]
337 // CHECK-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group
338 // CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group
339 // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP21]] to i64
340 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i64 [[IDXPROM]]
341 // CHECK-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4, !llvm.access.group
342 // CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
343 // CHECK: omp.body.continue:
344 // CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
345 // CHECK: omp.inner.for.inc:
346 // CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
347 // CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], 1
348 // CHECK-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
349 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
350 // CHECK: omp.inner.for.end:
351 // CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
352 // CHECK: omp.loop.exit:
353 // CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
354 // CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
355 // CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP24]])
356 // CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
357 // CHECK-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0
358 // CHECK-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
359 // CHECK: .omp.final.then:
360 // CHECK-NEXT: store i32 100, ptr [[I]], align 4
361 // CHECK-NEXT: br label [[DOTOMP_FINAL_DONE]]
362 // CHECK: .omp.final.done:
363 // CHECK-NEXT: ret void
365 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_sections_copyin
366 // CHECK-SAME: (ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
367 // CHECK-NEXT: entry:
368 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
369 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
370 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
371 // CHECK-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
372 // CHECK-NEXT: [[TMP0:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
373 // CHECK-NEXT: store i32 4, ptr [[TMP0]], align 4
374 // CHECK-NEXT: [[TMP1:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
375 // CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3:[0-9]+]], i32 3, ptr @test_omp_parallel_sections_copyin.omp_outlined, ptr [[A_ADDR]], ptr [[B_ADDR]], ptr [[TMP1]])
376 // CHECK-NEXT: ret void
378 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_sections_copyin.omp_outlined
379 // CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[B:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
380 // CHECK-NEXT: entry:
381 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
382 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
383 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
384 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
385 // CHECK-NEXT: [[X_ADDR:%.*]] = alloca ptr, align 8
386 // CHECK-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4
387 // CHECK-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4
388 // CHECK-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4
389 // CHECK-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4
390 // CHECK-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4
391 // CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
392 // CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
393 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
394 // CHECK-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
395 // CHECK-NEXT: store ptr [[X]], ptr [[X_ADDR]], align 8
396 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
397 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
398 // CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[X_ADDR]], align 8
399 // CHECK-NEXT: [[TMP3:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
400 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i64
401 // CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[TMP3]] to i64
402 // CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP4]], [[TMP5]]
403 // CHECK-NEXT: br i1 [[TMP6]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
404 // CHECK: copyin.not.master:
405 // CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP2]], align 4
406 // CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP3]], align 4
407 // CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
408 // CHECK: copyin.not.master.end:
409 // CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
410 // CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
411 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB1:[0-9]+]], i32 [[TMP9]])
412 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_LB_]], align 4
413 // CHECK-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_UB_]], align 4
414 // CHECK-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4
415 // CHECK-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4
416 // CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
417 // CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
418 // CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4:[0-9]+]], i32 [[TMP11]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1)
419 // CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
420 // CHECK-NEXT: [[TMP13:%.*]] = icmp slt i32 [[TMP12]], 1
421 // CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP12]], i32 1
422 // CHECK-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_SECTIONS_UB_]], align 4
423 // CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4
424 // CHECK-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
425 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
426 // CHECK: omp.inner.for.cond:
427 // CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
428 // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4
429 // CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
430 // CHECK-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
431 // CHECK: omp.inner.for.body:
432 // CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
433 // CHECK-NEXT: switch i32 [[TMP18]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [
434 // CHECK-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]]
435 // CHECK-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]]
436 // CHECK-NEXT: ]
437 // CHECK: .omp.sections.case:
438 // CHECK-NEXT: [[TMP19:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
439 // CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
440 // CHECK-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8
441 // CHECK-NEXT: store i32 [[TMP20]], ptr [[TMP21]], align 4
442 // CHECK-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
443 // CHECK: .omp.sections.case1:
444 // CHECK-NEXT: [[TMP22:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
445 // CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
446 // CHECK-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP1]], align 8
447 // CHECK-NEXT: store i32 [[TMP23]], ptr [[TMP24]], align 4
448 // CHECK-NEXT: br label [[DOTOMP_SECTIONS_EXIT]]
449 // CHECK: .omp.sections.exit:
450 // CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
451 // CHECK: omp.inner.for.inc:
452 // CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4
453 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP25]], 1
454 // CHECK-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4
455 // CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
456 // CHECK: omp.inner.for.end:
457 // CHECK-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
458 // CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
459 // CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4:[0-9]+]], i32 [[TMP27]])
460 // CHECK-NEXT: ret void
462 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_master_copyin
463 // CHECK-SAME: (ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
464 // CHECK-NEXT: entry:
465 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
466 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
467 // CHECK-NEXT: [[TMP0:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
468 // CHECK-NEXT: store i32 5, ptr [[TMP0]], align 4
469 // CHECK-NEXT: [[TMP1:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
470 // CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3:[0-9]+]], i32 2, ptr @test_omp_parallel_master_copyin.omp_outlined, ptr [[A_ADDR]], ptr [[TMP1]])
471 // CHECK-NEXT: ret void
473 // CHECK-LABEL: define {{[^@]+}}@test_omp_parallel_master_copyin.omp_outlined
474 // CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[A:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1:[0-9]+]] {
475 // CHECK-NEXT: entry:
476 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
477 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
478 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
479 // CHECK-NEXT: [[X_ADDR:%.*]] = alloca ptr, align 8
480 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
481 // CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
482 // CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
483 // CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
484 // CHECK-NEXT: store ptr [[X]], ptr [[X_ADDR]], align 8
485 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
486 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR]], align 8
487 // CHECK-NEXT: [[TMP2:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
488 // CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[TMP1]] to i64
489 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[TMP2]] to i64
490 // CHECK-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]]
491 // CHECK-NEXT: br i1 [[TMP5]], label [[COPYIN_NOT_MASTER:%.*]], label [[COPYIN_NOT_MASTER_END:%.*]]
492 // CHECK: copyin.not.master:
493 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4
494 // CHECK-NEXT: store i32 [[TMP6]], ptr [[TMP2]], align 4
495 // CHECK-NEXT: br label [[COPYIN_NOT_MASTER_END]]
496 // CHECK: copyin.not.master.end:
497 // CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
498 // CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
499 // CHECK-NEXT: call void @__kmpc_barrier(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]])
500 // CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
501 // CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
502 // CHECK-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB3:[0-9]+]], i32 [[TMP10]])
503 // CHECK-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
504 // CHECK-NEXT: br i1 [[TMP12]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
505 // CHECK: omp_if.then:
506 // CHECK-NEXT: store i32 0, ptr [[I]], align 4
507 // CHECK-NEXT: br label [[FOR_COND:%.*]]
508 // CHECK: for.cond:
509 // CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4
510 // CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP13]], 100
511 // CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
512 // CHECK: for.body:
513 // CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
514 // CHECK-NEXT: [[TMP15:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @x)
515 // CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4
516 // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP16]]
517 // CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP0]], align 8
518 // CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4
519 // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
520 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 [[IDXPROM]]
521 // CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
522 // CHECK-NEXT: br label [[FOR_INC:%.*]]
523 // CHECK: for.inc:
524 // CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4
525 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP19]], 1
526 // CHECK-NEXT: store i32 [[INC]], ptr [[I]], align 4
527 // CHECK-NEXT: br label [[FOR_COND]]
528 // CHECK: for.end:
529 // CHECK-NEXT: call void @__kmpc_end_master(ptr @[[GLOB3:[0-9]+]], i32 [[TMP10]])
530 // CHECK-NEXT: br label [[OMP_IF_END:%.*]]
531 // CHECK: omp_if.end:
532 // CHECK-NEXT: ret void