[TySan] Don't report globals with incomplete types. (#121922)
[llvm-project.git] / clang / test / OpenMP / target_teams_generic_loop_codegen_as_distribute.cpp
blobf3bbbc6229abdc6bff1cd56b67021b11ac2fece1
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 2
2 // REQUIRES: amdgpu-registered-target
4 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc
5 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=IR-GPU
7 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=IR
9 // Check same results after serialization round-trip
10 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s
11 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR-PCH
12 extern int foo(int i);
14 // expected-no-diagnostics
16 #ifndef HEADER
17 #define HEADER
18 int N = 100000;
19 int main()
21 int i;
22 int a[N];
23 int b[N];
25 // Presence of call. Cannot use 'parallel for', must use 'distribute' when
26 // assume-no-neseted-parallelism isn't specified.
27 #pragma omp target teams loop
28 for (i=0; i < N; i++) {
29 for (int j=0; j < N; j++) {
30 a[i] = b[i] * N + foo(j);
33 return 0;
35 #endif
36 // IR-GPU-LABEL: define weak_odr protected amdgpu_kernel void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27
37 // IR-GPU-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[VLA1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0:[0-9]+]] {
38 // IR-GPU-NEXT: entry:
39 // IR-GPU-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
40 // IR-GPU-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
41 // IR-GPU-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
42 // IR-GPU-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
43 // IR-GPU-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8, addrspace(5)
44 // IR-GPU-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
45 // IR-GPU-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
46 // IR-GPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
47 // IR-GPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4, addrspace(5)
48 // IR-GPU-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
49 // IR-GPU-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_ADDR]] to ptr
50 // IR-GPU-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
51 // IR-GPU-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
52 // IR-GPU-NEXT: [[VLA_ADDR2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR2]] to ptr
53 // IR-GPU-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
54 // IR-GPU-NEXT: [[N_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_CASTED]] to ptr
55 // IR-GPU-NEXT: [[DOTZERO_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTZERO_ADDR]] to ptr
56 // IR-GPU-NEXT: [[DOTTHREADID_TEMP__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTTHREADID_TEMP_]] to ptr
57 // IR-GPU-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
58 // IR-GPU-NEXT: store i64 [[N]], ptr [[N_ADDR_ASCAST]], align 8
59 // IR-GPU-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
60 // IR-GPU-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
61 // IR-GPU-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2_ASCAST]], align 8
62 // IR-GPU-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
63 // IR-GPU-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
64 // IR-GPU-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
65 // IR-GPU-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2_ASCAST]], align 8
66 // IR-GPU-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8
67 // IR-GPU-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27_kernel_environment to ptr), ptr [[DYN_PTR]])
68 // IR-GPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP4]], -1
69 // IR-GPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
70 // IR-GPU: user_code.entry:
71 // IR-GPU-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
72 // IR-GPU-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR_ASCAST]], align 4
73 // IR-GPU-NEXT: store i32 [[TMP6]], ptr [[N_CASTED_ASCAST]], align 4
74 // IR-GPU-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED_ASCAST]], align 8
75 // IR-GPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR_ASCAST]], align 4
76 // IR-GPU-NEXT: store i32 [[TMP5]], ptr [[DOTTHREADID_TEMP__ASCAST]], align 4
77 // IR-GPU-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27_omp_outlined(ptr [[DOTTHREADID_TEMP__ASCAST]], ptr [[DOTZERO_ADDR_ASCAST]], i64 [[TMP7]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) #[[ATTR2:[0-9]+]]
78 // IR-GPU-NEXT: call void @__kmpc_target_deinit()
79 // IR-GPU-NEXT: ret void
80 // IR-GPU: worker.exit:
81 // IR-GPU-NEXT: ret void
84 // IR-GPU-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27_omp_outlined
85 // IR-GPU-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[VLA1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR1:[0-9]+]] {
86 // IR-GPU-NEXT: entry:
87 // IR-GPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
88 // IR-GPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
89 // IR-GPU-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
90 // IR-GPU-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
91 // IR-GPU-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
92 // IR-GPU-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8, addrspace(5)
93 // IR-GPU-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
94 // IR-GPU-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
95 // IR-GPU-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
96 // IR-GPU-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4, addrspace(5)
97 // IR-GPU-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4, addrspace(5)
98 // IR-GPU-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
99 // IR-GPU-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4, addrspace(5)
100 // IR-GPU-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4, addrspace(5)
101 // IR-GPU-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
102 // IR-GPU-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
103 // IR-GPU-NEXT: [[I5:%.*]] = alloca i32, align 4, addrspace(5)
104 // IR-GPU-NEXT: [[J:%.*]] = alloca i32, align 4, addrspace(5)
105 // IR-GPU-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
106 // IR-GPU-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
107 // IR-GPU-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_ADDR]] to ptr
108 // IR-GPU-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
109 // IR-GPU-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
110 // IR-GPU-NEXT: [[VLA_ADDR2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR2]] to ptr
111 // IR-GPU-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
112 // IR-GPU-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
113 // IR-GPU-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
114 // IR-GPU-NEXT: [[DOTCAPTURE_EXPR__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_]] to ptr
115 // IR-GPU-NEXT: [[DOTCAPTURE_EXPR_3_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_3]] to ptr
116 // IR-GPU-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
117 // IR-GPU-NEXT: [[DOTOMP_COMB_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_COMB_LB]] to ptr
118 // IR-GPU-NEXT: [[DOTOMP_COMB_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_COMB_UB]] to ptr
119 // IR-GPU-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
120 // IR-GPU-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
121 // IR-GPU-NEXT: [[I5_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I5]] to ptr
122 // IR-GPU-NEXT: [[J_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J]] to ptr
123 // IR-GPU-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
124 // IR-GPU-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
125 // IR-GPU-NEXT: store i64 [[N]], ptr [[N_ADDR_ASCAST]], align 8
126 // IR-GPU-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
127 // IR-GPU-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
128 // IR-GPU-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2_ASCAST]], align 8
129 // IR-GPU-NEXT: store ptr [[B]], ptr [[B_ADDR_ASCAST]], align 8
130 // IR-GPU-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
131 // IR-GPU-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
132 // IR-GPU-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2_ASCAST]], align 8
133 // IR-GPU-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR_ASCAST]], align 8
134 // IR-GPU-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR_ASCAST]], align 4
135 // IR-GPU-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
136 // IR-GPU-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
137 // IR-GPU-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
138 // IR-GPU-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
139 // IR-GPU-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
140 // IR-GPU-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_3_ASCAST]], align 4
141 // IR-GPU-NEXT: store i32 0, ptr [[I_ASCAST]], align 4
142 // IR-GPU-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
143 // IR-GPU-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
144 // IR-GPU-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
145 // IR-GPU: omp.precond.then:
146 // IR-GPU-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
147 // IR-GPU-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_ASCAST]], align 4
148 // IR-GPU-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
149 // IR-GPU-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
150 // IR-GPU-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
151 // IR-GPU-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
152 // IR-GPU-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
153 // IR-GPU-NEXT: call void @__kmpc_distribute_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB2:[0-9]+]] to ptr), i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_COMB_LB_ASCAST]], ptr [[DOTOMP_COMB_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
154 // IR-GPU-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
155 // IR-GPU-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_ASCAST]], align 4
156 // IR-GPU-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
157 // IR-GPU-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
158 // IR-GPU: cond.true:
159 // IR-GPU-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_ASCAST]], align 4
160 // IR-GPU-NEXT: br label [[COND_END:%.*]]
161 // IR-GPU: cond.false:
162 // IR-GPU-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
163 // IR-GPU-NEXT: br label [[COND_END]]
164 // IR-GPU: cond.end:
165 // IR-GPU-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
166 // IR-GPU-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
167 // IR-GPU-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
168 // IR-GPU-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV_ASCAST]], align 4
169 // IR-GPU-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
170 // IR-GPU: omp.inner.for.cond:
171 // IR-GPU-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
172 // IR-GPU-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
173 // IR-GPU-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
174 // IR-GPU-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
175 // IR-GPU: omp.inner.for.body:
176 // IR-GPU-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
177 // IR-GPU-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
178 // IR-GPU-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
179 // IR-GPU-NEXT: store i32 [[ADD]], ptr [[I5_ASCAST]], align 4
180 // IR-GPU-NEXT: store i32 0, ptr [[J_ASCAST]], align 4
181 // IR-GPU-NEXT: br label [[FOR_COND:%.*]]
182 // IR-GPU: for.cond:
183 // IR-GPU-NEXT: [[TMP18:%.*]] = load i32, ptr [[J_ASCAST]], align 4
184 // IR-GPU-NEXT: [[TMP19:%.*]] = load i32, ptr [[N_ADDR_ASCAST]], align 4
185 // IR-GPU-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP18]], [[TMP19]]
186 // IR-GPU-NEXT: br i1 [[CMP8]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
187 // IR-GPU: for.body:
188 // IR-GPU-NEXT: [[TMP20:%.*]] = load i32, ptr [[I5_ASCAST]], align 4
189 // IR-GPU-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
190 // IR-GPU-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 [[IDXPROM]]
191 // IR-GPU-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
192 // IR-GPU-NEXT: [[TMP22:%.*]] = load i32, ptr [[N_ADDR_ASCAST]], align 4
193 // IR-GPU-NEXT: [[MUL9:%.*]] = mul nsw i32 [[TMP21]], [[TMP22]]
194 // IR-GPU-NEXT: [[TMP23:%.*]] = load i32, ptr [[J_ASCAST]], align 4
195 // IR-GPU-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP23]]) #[[ATTR4:[0-9]+]]
196 // IR-GPU-NEXT: [[ADD10:%.*]] = add nsw i32 [[MUL9]], [[CALL]]
197 // IR-GPU-NEXT: [[TMP24:%.*]] = load i32, ptr [[I5_ASCAST]], align 4
198 // IR-GPU-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP24]] to i64
199 // IR-GPU-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM11]]
200 // IR-GPU-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX12]], align 4
201 // IR-GPU-NEXT: br label [[FOR_INC:%.*]]
202 // IR-GPU: for.inc:
203 // IR-GPU-NEXT: [[TMP25:%.*]] = load i32, ptr [[J_ASCAST]], align 4
204 // IR-GPU-NEXT: [[INC:%.*]] = add nsw i32 [[TMP25]], 1
205 // IR-GPU-NEXT: store i32 [[INC]], ptr [[J_ASCAST]], align 4
206 // IR-GPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
207 // IR-GPU: for.end:
208 // IR-GPU-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
209 // IR-GPU: omp.body.continue:
210 // IR-GPU-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
211 // IR-GPU: omp.inner.for.inc:
212 // IR-GPU-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
213 // IR-GPU-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP26]], 1
214 // IR-GPU-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV_ASCAST]], align 4
215 // IR-GPU-NEXT: br label [[OMP_INNER_FOR_COND]]
216 // IR-GPU: omp.inner.for.end:
217 // IR-GPU-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
218 // IR-GPU: omp.loop.exit:
219 // IR-GPU-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
220 // IR-GPU-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
221 // IR-GPU-NEXT: call void @__kmpc_distribute_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP28]])
222 // IR-GPU-NEXT: br label [[OMP_PRECOND_END]]
223 // IR-GPU: omp.precond.end:
224 // IR-GPU-NEXT: ret void
227 // IR-LABEL: define dso_local noundef i32 @main
228 // IR-SAME: () #[[ATTR0:[0-9]+]] {
229 // IR-NEXT: entry:
230 // IR-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
231 // IR-NEXT: [[I:%.*]] = alloca i32, align 4
232 // IR-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
233 // IR-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
234 // IR-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
235 // IR-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
236 // IR-NEXT: store i32 0, ptr [[RETVAL]], align 4
237 // IR-NEXT: [[TMP0:%.*]] = load i32, ptr @N, align 4
238 // IR-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
239 // IR-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
240 // IR-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
241 // IR-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16
242 // IR-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
243 // IR-NEXT: [[TMP3:%.*]] = load i32, ptr @N, align 4
244 // IR-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
245 // IR-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP4]], align 16
246 // IR-NEXT: store i64 [[TMP4]], ptr [[__VLA_EXPR1]], align 8
247 // IR-NEXT: [[TMP5:%.*]] = load i32, ptr @N, align 4
248 // IR-NEXT: store i32 [[TMP5]], ptr [[N_CASTED]], align 4
249 // IR-NEXT: [[TMP6:%.*]] = load i64, ptr [[N_CASTED]], align 8
250 // IR-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27(i64 [[TMP6]], i64 [[TMP1]], ptr [[VLA]], i64 [[TMP4]], ptr [[VLA1]]) #[[ATTR3:[0-9]+]]
251 // IR-NEXT: store i32 0, ptr [[RETVAL]], align 4
252 // IR-NEXT: [[TMP7:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
253 // IR-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP7]])
254 // IR-NEXT: [[TMP8:%.*]] = load i32, ptr [[RETVAL]], align 4
255 // IR-NEXT: ret i32 [[TMP8]]
258 // IR-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27
259 // IR-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[VLA1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR2:[0-9]+]] {
260 // IR-NEXT: entry:
261 // IR-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
262 // IR-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
263 // IR-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
264 // IR-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
265 // IR-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
266 // IR-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
267 // IR-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
268 // IR-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
269 // IR-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
270 // IR-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8
271 // IR-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
272 // IR-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
273 // IR-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
274 // IR-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
275 // IR-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8
276 // IR-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
277 // IR-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4
278 // IR-NEXT: [[TMP5:%.*]] = load i64, ptr [[N_CASTED]], align 8
279 // IR-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2:[0-9]+]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.omp_outlined, i64 [[TMP5]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP2]], ptr [[TMP3]])
280 // IR-NEXT: ret void
283 // IR-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.omp_outlined
284 // IR-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[VLA1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR2]] {
285 // IR-NEXT: entry:
286 // IR-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
287 // IR-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
288 // IR-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
289 // IR-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
290 // IR-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
291 // IR-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
292 // IR-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
293 // IR-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
294 // IR-NEXT: [[TMP:%.*]] = alloca i32, align 4
295 // IR-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
296 // IR-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
297 // IR-NEXT: [[I:%.*]] = alloca i32, align 4
298 // IR-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
299 // IR-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
300 // IR-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
301 // IR-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
302 // IR-NEXT: [[I5:%.*]] = alloca i32, align 4
303 // IR-NEXT: [[J:%.*]] = alloca i32, align 4
304 // IR-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
305 // IR-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
306 // IR-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
307 // IR-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
308 // IR-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
309 // IR-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8
310 // IR-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
311 // IR-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
312 // IR-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
313 // IR-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
314 // IR-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8
315 // IR-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
316 // IR-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4
317 // IR-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
318 // IR-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
319 // IR-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
320 // IR-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
321 // IR-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_3]], align 4
322 // IR-NEXT: store i32 0, ptr [[I]], align 4
323 // IR-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
324 // IR-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
325 // IR-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
326 // IR: omp.precond.then:
327 // IR-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
328 // IR-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
329 // IR-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_COMB_UB]], align 4
330 // IR-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
331 // IR-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
332 // IR-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
333 // IR-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
334 // IR-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
335 // IR-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
336 // IR-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
337 // IR-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
338 // IR-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
339 // IR: cond.true:
340 // IR-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
341 // IR-NEXT: br label [[COND_END:%.*]]
342 // IR: cond.false:
343 // IR-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
344 // IR-NEXT: br label [[COND_END]]
345 // IR: cond.end:
346 // IR-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
347 // IR-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
348 // IR-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
349 // IR-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
350 // IR-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
351 // IR: omp.inner.for.cond:
352 // IR-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
353 // IR-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
354 // IR-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
355 // IR-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
356 // IR: omp.inner.for.body:
357 // IR-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
358 // IR-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
359 // IR-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
360 // IR-NEXT: store i32 [[ADD]], ptr [[I5]], align 4
361 // IR-NEXT: store i32 0, ptr [[J]], align 4
362 // IR-NEXT: br label [[FOR_COND:%.*]]
363 // IR: for.cond:
364 // IR-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4
365 // IR-NEXT: [[TMP19:%.*]] = load i32, ptr [[N_ADDR]], align 4
366 // IR-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP18]], [[TMP19]]
367 // IR-NEXT: br i1 [[CMP8]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
368 // IR: for.body:
369 // IR-NEXT: [[TMP20:%.*]] = load i32, ptr [[I5]], align 4
370 // IR-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
371 // IR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 [[IDXPROM]]
372 // IR-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
373 // IR-NEXT: [[TMP22:%.*]] = load i32, ptr [[N_ADDR]], align 4
374 // IR-NEXT: [[MUL9:%.*]] = mul nsw i32 [[TMP21]], [[TMP22]]
375 // IR-NEXT: [[TMP23:%.*]] = load i32, ptr [[J]], align 4
376 // IR-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP23]])
377 // IR-NEXT: [[ADD10:%.*]] = add nsw i32 [[MUL9]], [[CALL]]
378 // IR-NEXT: [[TMP24:%.*]] = load i32, ptr [[I5]], align 4
379 // IR-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP24]] to i64
380 // IR-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM11]]
381 // IR-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX12]], align 4
382 // IR-NEXT: br label [[FOR_INC:%.*]]
383 // IR: for.inc:
384 // IR-NEXT: [[TMP25:%.*]] = load i32, ptr [[J]], align 4
385 // IR-NEXT: [[INC:%.*]] = add nsw i32 [[TMP25]], 1
386 // IR-NEXT: store i32 [[INC]], ptr [[J]], align 4
387 // IR-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
388 // IR: for.end:
389 // IR-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
390 // IR: omp.body.continue:
391 // IR-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
392 // IR: omp.inner.for.inc:
393 // IR-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
394 // IR-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP26]], 1
395 // IR-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV]], align 4
396 // IR-NEXT: br label [[OMP_INNER_FOR_COND]]
397 // IR: omp.inner.for.end:
398 // IR-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
399 // IR: omp.loop.exit:
400 // IR-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
401 // IR-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
402 // IR-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP28]])
403 // IR-NEXT: br label [[OMP_PRECOND_END]]
404 // IR: omp.precond.end:
405 // IR-NEXT: ret void
408 // IR-PCH-LABEL: define dso_local noundef i32 @main
409 // IR-PCH-SAME: () #[[ATTR0:[0-9]+]] {
410 // IR-PCH-NEXT: entry:
411 // IR-PCH-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
412 // IR-PCH-NEXT: [[I:%.*]] = alloca i32, align 4
413 // IR-PCH-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
414 // IR-PCH-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
415 // IR-PCH-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8
416 // IR-PCH-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
417 // IR-PCH-NEXT: store i32 0, ptr [[RETVAL]], align 4
418 // IR-PCH-NEXT: [[TMP0:%.*]] = load i32, ptr @N, align 4
419 // IR-PCH-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
420 // IR-PCH-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave.p0()
421 // IR-PCH-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8
422 // IR-PCH-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 16
423 // IR-PCH-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8
424 // IR-PCH-NEXT: [[TMP3:%.*]] = load i32, ptr @N, align 4
425 // IR-PCH-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
426 // IR-PCH-NEXT: [[VLA1:%.*]] = alloca i32, i64 [[TMP4]], align 16
427 // IR-PCH-NEXT: store i64 [[TMP4]], ptr [[__VLA_EXPR1]], align 8
428 // IR-PCH-NEXT: [[TMP5:%.*]] = load i32, ptr @N, align 4
429 // IR-PCH-NEXT: store i32 [[TMP5]], ptr [[N_CASTED]], align 4
430 // IR-PCH-NEXT: [[TMP6:%.*]] = load i64, ptr [[N_CASTED]], align 8
431 // IR-PCH-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27(i64 [[TMP6]], i64 [[TMP1]], ptr [[VLA]], i64 [[TMP4]], ptr [[VLA1]]) #[[ATTR3:[0-9]+]]
432 // IR-PCH-NEXT: store i32 0, ptr [[RETVAL]], align 4
433 // IR-PCH-NEXT: [[TMP7:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8
434 // IR-PCH-NEXT: call void @llvm.stackrestore.p0(ptr [[TMP7]])
435 // IR-PCH-NEXT: [[TMP8:%.*]] = load i32, ptr [[RETVAL]], align 4
436 // IR-PCH-NEXT: ret i32 [[TMP8]]
439 // IR-PCH-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27
440 // IR-PCH-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[VLA1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR2:[0-9]+]] {
441 // IR-PCH-NEXT: entry:
442 // IR-PCH-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
443 // IR-PCH-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
444 // IR-PCH-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
445 // IR-PCH-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
446 // IR-PCH-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
447 // IR-PCH-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
448 // IR-PCH-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
449 // IR-PCH-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
450 // IR-PCH-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
451 // IR-PCH-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8
452 // IR-PCH-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
453 // IR-PCH-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
454 // IR-PCH-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
455 // IR-PCH-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
456 // IR-PCH-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8
457 // IR-PCH-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
458 // IR-PCH-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4
459 // IR-PCH-NEXT: [[TMP5:%.*]] = load i64, ptr [[N_CASTED]], align 8
460 // IR-PCH-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2:[0-9]+]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.omp_outlined, i64 [[TMP5]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP2]], ptr [[TMP3]])
461 // IR-PCH-NEXT: ret void
464 // IR-PCH-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l27.omp_outlined
465 // IR-PCH-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[VLA1:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR2]] {
466 // IR-PCH-NEXT: entry:
467 // IR-PCH-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
468 // IR-PCH-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
469 // IR-PCH-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
470 // IR-PCH-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8
471 // IR-PCH-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
472 // IR-PCH-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8
473 // IR-PCH-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
474 // IR-PCH-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
475 // IR-PCH-NEXT: [[TMP:%.*]] = alloca i32, align 4
476 // IR-PCH-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
477 // IR-PCH-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
478 // IR-PCH-NEXT: [[I:%.*]] = alloca i32, align 4
479 // IR-PCH-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
480 // IR-PCH-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
481 // IR-PCH-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
482 // IR-PCH-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
483 // IR-PCH-NEXT: [[I5:%.*]] = alloca i32, align 4
484 // IR-PCH-NEXT: [[J:%.*]] = alloca i32, align 4
485 // IR-PCH-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
486 // IR-PCH-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
487 // IR-PCH-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
488 // IR-PCH-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8
489 // IR-PCH-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
490 // IR-PCH-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8
491 // IR-PCH-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
492 // IR-PCH-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8
493 // IR-PCH-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
494 // IR-PCH-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8
495 // IR-PCH-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8
496 // IR-PCH-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
497 // IR-PCH-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4
498 // IR-PCH-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
499 // IR-PCH-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0
500 // IR-PCH-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
501 // IR-PCH-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1
502 // IR-PCH-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_3]], align 4
503 // IR-PCH-NEXT: store i32 0, ptr [[I]], align 4
504 // IR-PCH-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
505 // IR-PCH-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]]
506 // IR-PCH-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
507 // IR-PCH: omp.precond.then:
508 // IR-PCH-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
509 // IR-PCH-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
510 // IR-PCH-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_COMB_UB]], align 4
511 // IR-PCH-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
512 // IR-PCH-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
513 // IR-PCH-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
514 // IR-PCH-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
515 // IR-PCH-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
516 // IR-PCH-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
517 // IR-PCH-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
518 // IR-PCH-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
519 // IR-PCH-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
520 // IR-PCH: cond.true:
521 // IR-PCH-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
522 // IR-PCH-NEXT: br label [[COND_END:%.*]]
523 // IR-PCH: cond.false:
524 // IR-PCH-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
525 // IR-PCH-NEXT: br label [[COND_END]]
526 // IR-PCH: cond.end:
527 // IR-PCH-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
528 // IR-PCH-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
529 // IR-PCH-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
530 // IR-PCH-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
531 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
532 // IR-PCH: omp.inner.for.cond:
533 // IR-PCH-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
534 // IR-PCH-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
535 // IR-PCH-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]]
536 // IR-PCH-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
537 // IR-PCH: omp.inner.for.body:
538 // IR-PCH-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
539 // IR-PCH-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1
540 // IR-PCH-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
541 // IR-PCH-NEXT: store i32 [[ADD]], ptr [[I5]], align 4
542 // IR-PCH-NEXT: store i32 0, ptr [[J]], align 4
543 // IR-PCH-NEXT: br label [[FOR_COND:%.*]]
544 // IR-PCH: for.cond:
545 // IR-PCH-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4
546 // IR-PCH-NEXT: [[TMP19:%.*]] = load i32, ptr [[N_ADDR]], align 4
547 // IR-PCH-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP18]], [[TMP19]]
548 // IR-PCH-NEXT: br i1 [[CMP8]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
549 // IR-PCH: for.body:
550 // IR-PCH-NEXT: [[TMP20:%.*]] = load i32, ptr [[I5]], align 4
551 // IR-PCH-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
552 // IR-PCH-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i64 [[IDXPROM]]
553 // IR-PCH-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
554 // IR-PCH-NEXT: [[TMP22:%.*]] = load i32, ptr [[N_ADDR]], align 4
555 // IR-PCH-NEXT: [[MUL9:%.*]] = mul nsw i32 [[TMP21]], [[TMP22]]
556 // IR-PCH-NEXT: [[TMP23:%.*]] = load i32, ptr [[J]], align 4
557 // IR-PCH-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP23]])
558 // IR-PCH-NEXT: [[ADD10:%.*]] = add nsw i32 [[MUL9]], [[CALL]]
559 // IR-PCH-NEXT: [[TMP24:%.*]] = load i32, ptr [[I5]], align 4
560 // IR-PCH-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP24]] to i64
561 // IR-PCH-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM11]]
562 // IR-PCH-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX12]], align 4
563 // IR-PCH-NEXT: br label [[FOR_INC:%.*]]
564 // IR-PCH: for.inc:
565 // IR-PCH-NEXT: [[TMP25:%.*]] = load i32, ptr [[J]], align 4
566 // IR-PCH-NEXT: [[INC:%.*]] = add nsw i32 [[TMP25]], 1
567 // IR-PCH-NEXT: store i32 [[INC]], ptr [[J]], align 4
568 // IR-PCH-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
569 // IR-PCH: for.end:
570 // IR-PCH-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
571 // IR-PCH: omp.body.continue:
572 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
573 // IR-PCH: omp.inner.for.inc:
574 // IR-PCH-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
575 // IR-PCH-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP26]], 1
576 // IR-PCH-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV]], align 4
577 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_COND]]
578 // IR-PCH: omp.inner.for.end:
579 // IR-PCH-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
580 // IR-PCH: omp.loop.exit:
581 // IR-PCH-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
582 // IR-PCH-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
583 // IR-PCH-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP28]])
584 // IR-PCH-NEXT: br label [[OMP_PRECOND_END]]
585 // IR-PCH: omp.precond.end:
586 // IR-PCH-NEXT: ret void