[LLVM][IR] Use splat syntax when printing ConstantExpr based splats. (#116856)
[llvm-project.git] / clang / test / OpenMP / distribute_parallel_for_simd_num_threads_codegen.cpp
blob0d4b9b245743d35cfbb7f3064af953553f8f9321
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
6 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
8 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
10 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5
11 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
12 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
14 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3
15 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
16 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
18 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK9
19 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
20 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
22 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11
23 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
24 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
26 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK13
27 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
28 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13
30 // RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11
31 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s
32 // RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11
34 // expected-no-diagnostics
35 #ifndef HEADER
36 #define HEADER
38 typedef __INTPTR_TYPE__ intptr_t;
41 void foo();
43 struct S {
44 intptr_t a, b, c;
45 S(intptr_t a) : a(a) {}
46 operator char() { extern void mayThrow(); mayThrow(); return a; }
47 ~S() {}
50 template <typename T, int C>
51 int tmain() {
52 #pragma omp target
53 #pragma omp teams
54 #pragma omp distribute parallel for simd num_threads(C)
55 for (int i = 0; i < 100; i++)
56 foo();
57 #pragma omp target
58 #pragma omp teams
59 #pragma omp distribute parallel for simd num_threads(T(23))
60 for (int i = 0; i < 100; i++)
61 foo();
62 return 0;
65 int main() {
66 S s(0);
67 char a = s;
68 #pragma omp target
69 #pragma omp teams
70 #pragma omp distribute parallel for simd num_threads(2)
71 for (int i = 0; i < 100; i++) {
72 foo();
74 #pragma omp target
75 #pragma omp teams
77 #pragma omp distribute parallel for simd num_threads(a)
78 for (int i = 0; i < 100; i++) {
79 foo();
81 return a + tmain<char, 5>() + tmain<S, 1>();
84 // tmain 5
86 // tmain 1
95 #endif
96 // CHECK1-LABEL: define {{[^@]+}}@main
97 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
98 // CHECK1-NEXT: entry:
99 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
100 // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
101 // CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1
102 // CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
103 // CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
104 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
105 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
106 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
107 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
108 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
109 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
110 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
111 // CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
112 // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4
113 // CHECK1-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
114 // CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
115 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
116 // CHECK1: invoke.cont:
117 // CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1
118 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
119 // CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
120 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
121 // CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
122 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
123 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
124 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
125 // CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
126 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
127 // CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
128 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
129 // CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
130 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
131 // CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
132 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
133 // CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
134 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
135 // CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
136 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
137 // CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
138 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
139 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
140 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
141 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
142 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
143 // CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
144 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
145 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
146 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
147 // CHECK1: omp_offload.failed:
148 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
149 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
150 // CHECK1: lpad:
151 // CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
152 // CHECK1-NEXT: cleanup
153 // CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
154 // CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
155 // CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
156 // CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
157 // CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
158 // CHECK1-NEXT: br label [[EH_RESUME:%.*]]
159 // CHECK1: omp_offload.cont:
160 // CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
161 // CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
162 // CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
163 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
164 // CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8
165 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
166 // CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
167 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
168 // CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
169 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
170 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
171 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
172 // CHECK1-NEXT: store i32 3, ptr [[TMP25]], align 4
173 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
174 // CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4
175 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
176 // CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
177 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
178 // CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
179 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
180 // CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8
181 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
182 // CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8
183 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
184 // CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8
185 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
186 // CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8
187 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
188 // CHECK1-NEXT: store i64 100, ptr [[TMP33]], align 8
189 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
190 // CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8
191 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
192 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
193 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
194 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
195 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
196 // CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4
197 // CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
198 // CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
199 // CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
200 // CHECK1: omp_offload.failed3:
201 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
202 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
203 // CHECK1: omp_offload.cont4:
204 // CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
205 // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32
206 // CHECK1-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
207 // CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
208 // CHECK1: invoke.cont5:
209 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
210 // CHECK1-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
211 // CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
212 // CHECK1: invoke.cont7:
213 // CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
214 // CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
215 // CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
216 // CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4
217 // CHECK1-NEXT: ret i32 [[TMP41]]
218 // CHECK1: eh.resume:
219 // CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
220 // CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
221 // CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
222 // CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
223 // CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
226 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El
227 // CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
228 // CHECK1-NEXT: entry:
229 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
230 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
231 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
232 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
233 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
234 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
235 // CHECK1-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
236 // CHECK1-NEXT: ret void
239 // CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv
240 // CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
241 // CHECK1-NEXT: entry:
242 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
243 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
244 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
245 // CHECK1-NEXT: call void @_Z8mayThrowv()
246 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
247 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
248 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
249 // CHECK1-NEXT: ret i8 [[CONV]]
252 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
253 // CHECK1-SAME: () #[[ATTR2:[0-9]+]] {
254 // CHECK1-NEXT: entry:
255 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
256 // CHECK1-NEXT: ret void
259 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
260 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
261 // CHECK1-NEXT: entry:
262 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
263 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
264 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
265 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
266 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
267 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
268 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
269 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
270 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
271 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
272 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
273 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
274 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
275 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
276 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
277 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
278 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
279 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
280 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
281 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
282 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
283 // CHECK1: cond.true:
284 // CHECK1-NEXT: br label [[COND_END:%.*]]
285 // CHECK1: cond.false:
286 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
287 // CHECK1-NEXT: br label [[COND_END]]
288 // CHECK1: cond.end:
289 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
290 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
291 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
292 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
293 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
294 // CHECK1: omp.inner.for.cond:
295 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
296 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
297 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
298 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
299 // CHECK1: omp.inner.for.body:
300 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group [[ACC_GRP9]]
301 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
302 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
303 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
304 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
305 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP9]]
306 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
307 // CHECK1: omp.inner.for.inc:
308 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
309 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
310 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
311 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
312 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
313 // CHECK1: omp.inner.for.end:
314 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
315 // CHECK1: omp.loop.exit:
316 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
317 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
318 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
319 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
320 // CHECK1: .omp.final.then:
321 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
322 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
323 // CHECK1: .omp.final.done:
324 // CHECK1-NEXT: ret void
327 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
328 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
329 // CHECK1-NEXT: entry:
330 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
331 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
332 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
333 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
334 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
335 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
336 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
337 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
338 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
339 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
340 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
341 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
342 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
343 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
344 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
345 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
346 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
347 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
348 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
349 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
350 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
351 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
352 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
353 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
354 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
355 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
356 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
357 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
358 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
359 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
360 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
361 // CHECK1: cond.true:
362 // CHECK1-NEXT: br label [[COND_END:%.*]]
363 // CHECK1: cond.false:
364 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
365 // CHECK1-NEXT: br label [[COND_END]]
366 // CHECK1: cond.end:
367 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
368 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
369 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
370 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
371 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
372 // CHECK1: omp.inner.for.cond:
373 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
374 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
375 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
376 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
377 // CHECK1: omp.inner.for.body:
378 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
379 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
380 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
381 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
382 // CHECK1-NEXT: invoke void @_Z3foov()
383 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP13]]
384 // CHECK1: invoke.cont:
385 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
386 // CHECK1: omp.body.continue:
387 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
388 // CHECK1: omp.inner.for.inc:
389 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
390 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
391 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
392 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
393 // CHECK1: omp.inner.for.end:
394 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
395 // CHECK1: omp.loop.exit:
396 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
397 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
398 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
399 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
400 // CHECK1: .omp.final.then:
401 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
402 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
403 // CHECK1: .omp.final.done:
404 // CHECK1-NEXT: ret void
405 // CHECK1: terminate.lpad:
406 // CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
407 // CHECK1-NEXT: catch ptr null
408 // CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
409 // CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP13]]
410 // CHECK1-NEXT: unreachable
413 // CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate
414 // CHECK1-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
415 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
416 // CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
417 // CHECK1-NEXT: unreachable
420 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
421 // CHECK1-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
422 // CHECK1-NEXT: entry:
423 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
424 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
425 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
426 // CHECK1-NEXT: ret void
429 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
430 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
431 // CHECK1-NEXT: entry:
432 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
433 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
434 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
435 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
436 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
437 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
438 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
439 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
440 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
441 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
442 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
443 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
444 // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
445 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
446 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
447 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
448 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
449 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
450 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
451 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
452 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
453 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
454 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
455 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
456 // CHECK1: cond.true:
457 // CHECK1-NEXT: br label [[COND_END:%.*]]
458 // CHECK1: cond.false:
459 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
460 // CHECK1-NEXT: br label [[COND_END]]
461 // CHECK1: cond.end:
462 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
463 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
464 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
465 // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
466 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
467 // CHECK1: omp.inner.for.cond:
468 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
469 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
470 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
471 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
472 // CHECK1: omp.inner.for.body:
473 // CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP18]]
474 // CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
475 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]), !llvm.access.group [[ACC_GRP18]]
476 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
477 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
478 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
479 // CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
480 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP18]]
481 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
482 // CHECK1: omp.inner.for.inc:
483 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
484 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
485 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
486 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
487 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
488 // CHECK1: omp.inner.for.end:
489 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
490 // CHECK1: omp.loop.exit:
491 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
492 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
493 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
494 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
495 // CHECK1: .omp.final.then:
496 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
497 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
498 // CHECK1: .omp.final.done:
499 // CHECK1-NEXT: ret void
502 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
503 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
504 // CHECK1-NEXT: entry:
505 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
506 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
507 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
508 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
509 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
510 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
511 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
512 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
513 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
514 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
515 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
516 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
517 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
518 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
519 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
520 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
521 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
522 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
523 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
524 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
525 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
526 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
527 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
528 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
529 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
530 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
531 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
532 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
533 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
534 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
535 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
536 // CHECK1: cond.true:
537 // CHECK1-NEXT: br label [[COND_END:%.*]]
538 // CHECK1: cond.false:
539 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
540 // CHECK1-NEXT: br label [[COND_END]]
541 // CHECK1: cond.end:
542 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
543 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
544 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
545 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
546 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
547 // CHECK1: omp.inner.for.cond:
548 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
549 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
550 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
551 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
552 // CHECK1: omp.inner.for.body:
553 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
554 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
555 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
556 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
557 // CHECK1-NEXT: invoke void @_Z3foov()
558 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP21]]
559 // CHECK1: invoke.cont:
560 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
561 // CHECK1: omp.body.continue:
562 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
563 // CHECK1: omp.inner.for.inc:
564 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
565 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
566 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
567 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
568 // CHECK1: omp.inner.for.end:
569 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
570 // CHECK1: omp.loop.exit:
571 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
572 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
573 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
574 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
575 // CHECK1: .omp.final.then:
576 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
577 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
578 // CHECK1: .omp.final.done:
579 // CHECK1-NEXT: ret void
580 // CHECK1: terminate.lpad:
581 // CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
582 // CHECK1-NEXT: catch ptr null
583 // CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
584 // CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP21]]
585 // CHECK1-NEXT: unreachable
588 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
589 // CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat {
590 // CHECK1-NEXT: entry:
591 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
592 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
593 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
594 // CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
595 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
596 // CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
597 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
598 // CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
599 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
600 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
601 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
602 // CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
603 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
604 // CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
605 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
606 // CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
607 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
608 // CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
609 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
610 // CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
611 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
612 // CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
613 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
614 // CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
615 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
616 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
617 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
618 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
619 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
620 // CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
621 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
622 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
623 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
624 // CHECK1: omp_offload.failed:
625 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
626 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
627 // CHECK1: omp_offload.cont:
628 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
629 // CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4
630 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
631 // CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4
632 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
633 // CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
634 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
635 // CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8
636 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
637 // CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8
638 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
639 // CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8
640 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
641 // CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8
642 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
643 // CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
644 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
645 // CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8
646 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
647 // CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8
648 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
649 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
650 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
651 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
652 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
653 // CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4
654 // CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
655 // CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
656 // CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
657 // CHECK1: omp_offload.failed3:
658 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
659 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
660 // CHECK1: omp_offload.cont4:
661 // CHECK1-NEXT: ret i32 0
664 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
665 // CHECK1-SAME: () #[[ATTR6]] comdat {
666 // CHECK1-NEXT: entry:
667 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
668 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
669 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
670 // CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
671 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
672 // CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4
673 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
674 // CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4
675 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
676 // CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
677 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
678 // CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8
679 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
680 // CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8
681 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
682 // CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8
683 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
684 // CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8
685 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
686 // CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8
687 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
688 // CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8
689 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
690 // CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8
691 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
692 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
693 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
694 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
695 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
696 // CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4
697 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
698 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
699 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
700 // CHECK1: omp_offload.failed:
701 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
702 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
703 // CHECK1: omp_offload.cont:
704 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
705 // CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4
706 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
707 // CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4
708 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
709 // CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8
710 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
711 // CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8
712 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
713 // CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8
714 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
715 // CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8
716 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
717 // CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8
718 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
719 // CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
720 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
721 // CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8
722 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
723 // CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8
724 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
725 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
726 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
727 // CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
728 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
729 // CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4
730 // CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
731 // CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
732 // CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
733 // CHECK1: omp_offload.failed3:
734 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
735 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]]
736 // CHECK1: omp_offload.cont4:
737 // CHECK1-NEXT: ret i32 0
740 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev
741 // CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
742 // CHECK1-NEXT: entry:
743 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
744 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
745 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
746 // CHECK1-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
747 // CHECK1-NEXT: ret void
750 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El
751 // CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
752 // CHECK1-NEXT: entry:
753 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
754 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
755 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
756 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
757 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
758 // CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
759 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
760 // CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
761 // CHECK1-NEXT: ret void
764 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev
765 // CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
766 // CHECK1-NEXT: entry:
767 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
768 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
769 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
770 // CHECK1-NEXT: ret void
773 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
774 // CHECK1-SAME: () #[[ATTR2]] {
775 // CHECK1-NEXT: entry:
776 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
777 // CHECK1-NEXT: ret void
780 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
781 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
782 // CHECK1-NEXT: entry:
783 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
784 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
785 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
786 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
787 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
788 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
789 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
790 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
791 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
792 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
793 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
794 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
795 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
796 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
797 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
798 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
799 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
800 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
801 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
802 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
803 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
804 // CHECK1: cond.true:
805 // CHECK1-NEXT: br label [[COND_END:%.*]]
806 // CHECK1: cond.false:
807 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
808 // CHECK1-NEXT: br label [[COND_END]]
809 // CHECK1: cond.end:
810 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
811 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
812 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
813 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
814 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
815 // CHECK1: omp.inner.for.cond:
816 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
817 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
818 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
819 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
820 // CHECK1: omp.inner.for.body:
821 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5), !llvm.access.group [[ACC_GRP24]]
822 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
823 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
824 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
825 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
826 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP24]]
827 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
828 // CHECK1: omp.inner.for.inc:
829 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
830 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
831 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
832 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
833 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
834 // CHECK1: omp.inner.for.end:
835 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
836 // CHECK1: omp.loop.exit:
837 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
838 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
839 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
840 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
841 // CHECK1: .omp.final.then:
842 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
843 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
844 // CHECK1: .omp.final.done:
845 // CHECK1-NEXT: ret void
848 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
849 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
850 // CHECK1-NEXT: entry:
851 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
852 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
853 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
854 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
855 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
856 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
857 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
858 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
859 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
860 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
861 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
862 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
863 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
864 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
865 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
866 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
867 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
868 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
869 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
870 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
871 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
872 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
873 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
874 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
875 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
876 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
877 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
878 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
879 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
880 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
881 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
882 // CHECK1: cond.true:
883 // CHECK1-NEXT: br label [[COND_END:%.*]]
884 // CHECK1: cond.false:
885 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
886 // CHECK1-NEXT: br label [[COND_END]]
887 // CHECK1: cond.end:
888 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
889 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
890 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
891 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
892 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
893 // CHECK1: omp.inner.for.cond:
894 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
895 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
896 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
897 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
898 // CHECK1: omp.inner.for.body:
899 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
900 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
901 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
902 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
903 // CHECK1-NEXT: invoke void @_Z3foov()
904 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP27]]
905 // CHECK1: invoke.cont:
906 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
907 // CHECK1: omp.body.continue:
908 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
909 // CHECK1: omp.inner.for.inc:
910 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
911 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
912 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
913 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
914 // CHECK1: omp.inner.for.end:
915 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
916 // CHECK1: omp.loop.exit:
917 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
918 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
919 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
920 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
921 // CHECK1: .omp.final.then:
922 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
923 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
924 // CHECK1: .omp.final.done:
925 // CHECK1-NEXT: ret void
926 // CHECK1: terminate.lpad:
927 // CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
928 // CHECK1-NEXT: catch ptr null
929 // CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
930 // CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP27]]
931 // CHECK1-NEXT: unreachable
934 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
935 // CHECK1-SAME: () #[[ATTR2]] {
936 // CHECK1-NEXT: entry:
937 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
938 // CHECK1-NEXT: ret void
941 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
942 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
943 // CHECK1-NEXT: entry:
944 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
945 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
946 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
947 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
948 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
949 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
950 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
951 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
952 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
953 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
954 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
955 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
956 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
957 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
958 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
959 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
960 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
961 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
962 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
963 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
964 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
965 // CHECK1: cond.true:
966 // CHECK1-NEXT: br label [[COND_END:%.*]]
967 // CHECK1: cond.false:
968 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
969 // CHECK1-NEXT: br label [[COND_END]]
970 // CHECK1: cond.end:
971 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
972 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
973 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
974 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
975 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
976 // CHECK1: omp.inner.for.cond:
977 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
978 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
979 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
980 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
981 // CHECK1: omp.inner.for.body:
982 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23), !llvm.access.group [[ACC_GRP30]]
983 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
984 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
985 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
986 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
987 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP30]]
988 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
989 // CHECK1: omp.inner.for.inc:
990 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
991 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
992 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
993 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
994 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
995 // CHECK1: omp.inner.for.end:
996 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
997 // CHECK1: omp.loop.exit:
998 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
999 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1000 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1001 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1002 // CHECK1: .omp.final.then:
1003 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
1004 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1005 // CHECK1: .omp.final.done:
1006 // CHECK1-NEXT: ret void
1009 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
1010 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
1011 // CHECK1-NEXT: entry:
1012 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1013 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1014 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1015 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1016 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1017 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1018 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1019 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1020 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1021 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1022 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1023 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1024 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1025 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1026 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1027 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1028 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
1029 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1030 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
1031 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1032 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
1033 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1034 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1035 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1036 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1037 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1038 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
1039 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1040 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1041 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
1042 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1043 // CHECK1: cond.true:
1044 // CHECK1-NEXT: br label [[COND_END:%.*]]
1045 // CHECK1: cond.false:
1046 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1047 // CHECK1-NEXT: br label [[COND_END]]
1048 // CHECK1: cond.end:
1049 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
1050 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
1051 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1052 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
1053 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1054 // CHECK1: omp.inner.for.cond:
1055 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
1056 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
1057 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
1058 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1059 // CHECK1: omp.inner.for.body:
1060 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
1061 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
1062 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1063 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
1064 // CHECK1-NEXT: invoke void @_Z3foov()
1065 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP33]]
1066 // CHECK1: invoke.cont:
1067 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1068 // CHECK1: omp.body.continue:
1069 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1070 // CHECK1: omp.inner.for.inc:
1071 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
1072 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
1073 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
1074 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
1075 // CHECK1: omp.inner.for.end:
1076 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1077 // CHECK1: omp.loop.exit:
1078 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
1079 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1080 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
1081 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1082 // CHECK1: .omp.final.then:
1083 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
1084 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1085 // CHECK1: .omp.final.done:
1086 // CHECK1-NEXT: ret void
1087 // CHECK1: terminate.lpad:
1088 // CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
1089 // CHECK1-NEXT: catch ptr null
1090 // CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
1091 // CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP33]]
1092 // CHECK1-NEXT: unreachable
1095 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
1096 // CHECK1-SAME: () #[[ATTR2]] {
1097 // CHECK1-NEXT: entry:
1098 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
1099 // CHECK1-NEXT: ret void
1102 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
1103 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
1104 // CHECK1-NEXT: entry:
1105 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1106 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1107 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1108 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1109 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1110 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1111 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1112 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1113 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1114 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1115 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1116 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1117 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
1118 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1119 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1120 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1121 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
1122 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1123 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1124 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
1125 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1126 // CHECK1: cond.true:
1127 // CHECK1-NEXT: br label [[COND_END:%.*]]
1128 // CHECK1: cond.false:
1129 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1130 // CHECK1-NEXT: br label [[COND_END]]
1131 // CHECK1: cond.end:
1132 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1133 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1134 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1135 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
1136 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1137 // CHECK1: omp.inner.for.cond:
1138 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
1139 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
1140 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1141 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1142 // CHECK1: omp.inner.for.body:
1143 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1), !llvm.access.group [[ACC_GRP36]]
1144 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
1145 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
1146 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
1147 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
1148 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP36]]
1149 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1150 // CHECK1: omp.inner.for.inc:
1151 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
1152 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
1153 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
1154 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
1155 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
1156 // CHECK1: omp.inner.for.end:
1157 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1158 // CHECK1: omp.loop.exit:
1159 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
1160 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1161 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1162 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1163 // CHECK1: .omp.final.then:
1164 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
1165 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1166 // CHECK1: .omp.final.done:
1167 // CHECK1-NEXT: ret void
1170 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
1171 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
1172 // CHECK1-NEXT: entry:
1173 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1174 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1175 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1176 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1177 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1178 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1179 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1180 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1181 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1182 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1183 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1184 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1185 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1186 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1187 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1188 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1189 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
1190 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1191 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
1192 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1193 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
1194 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1195 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1196 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1197 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1198 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1199 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
1200 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1201 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1202 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
1203 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1204 // CHECK1: cond.true:
1205 // CHECK1-NEXT: br label [[COND_END:%.*]]
1206 // CHECK1: cond.false:
1207 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1208 // CHECK1-NEXT: br label [[COND_END]]
1209 // CHECK1: cond.end:
1210 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
1211 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
1212 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1213 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
1214 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1215 // CHECK1: omp.inner.for.cond:
1216 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
1217 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
1218 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
1219 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1220 // CHECK1: omp.inner.for.body:
1221 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
1222 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
1223 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1224 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
1225 // CHECK1-NEXT: invoke void @_Z3foov()
1226 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP39]]
1227 // CHECK1: invoke.cont:
1228 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1229 // CHECK1: omp.body.continue:
1230 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1231 // CHECK1: omp.inner.for.inc:
1232 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
1233 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
1234 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
1235 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
1236 // CHECK1: omp.inner.for.end:
1237 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1238 // CHECK1: omp.loop.exit:
1239 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
1240 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1241 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
1242 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1243 // CHECK1: .omp.final.then:
1244 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
1245 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1246 // CHECK1: .omp.final.done:
1247 // CHECK1-NEXT: ret void
1248 // CHECK1: terminate.lpad:
1249 // CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
1250 // CHECK1-NEXT: catch ptr null
1251 // CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
1252 // CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP39]]
1253 // CHECK1-NEXT: unreachable
1256 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
1257 // CHECK1-SAME: () #[[ATTR2]] {
1258 // CHECK1-NEXT: entry:
1259 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
1260 // CHECK1-NEXT: ret void
1263 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
1264 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
1265 // CHECK1-NEXT: entry:
1266 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1267 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1268 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1269 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1270 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1271 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1272 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1273 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1274 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1275 // CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
1276 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1277 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1278 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1279 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
1280 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1281 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1282 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1283 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
1284 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1285 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1286 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
1287 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1288 // CHECK1: cond.true:
1289 // CHECK1-NEXT: br label [[COND_END:%.*]]
1290 // CHECK1: cond.false:
1291 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1292 // CHECK1-NEXT: br label [[COND_END]]
1293 // CHECK1: cond.end:
1294 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1295 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1296 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1297 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
1298 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1299 // CHECK1: omp.inner.for.cond:
1300 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]]
1301 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
1302 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1303 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1304 // CHECK1: omp.inner.for.body:
1305 // CHECK1-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23)
1306 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP42]]
1307 // CHECK1: invoke.cont:
1308 // CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]])
1309 // CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP42]]
1310 // CHECK1: invoke.cont2:
1311 // CHECK1-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32
1312 // CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]), !llvm.access.group [[ACC_GRP42]]
1313 // CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP42]]
1314 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP42]]
1315 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
1316 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
1317 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
1318 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP42]]
1319 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1320 // CHECK1: omp.inner.for.inc:
1321 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
1322 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP42]]
1323 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
1324 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
1325 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
1326 // CHECK1: omp.inner.for.end:
1327 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1328 // CHECK1: omp.loop.exit:
1329 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
1330 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1331 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
1332 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1333 // CHECK1: .omp.final.then:
1334 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
1335 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1336 // CHECK1: .omp.final.done:
1337 // CHECK1-NEXT: ret void
1338 // CHECK1: terminate.lpad:
1339 // CHECK1-NEXT: [[TMP16:%.*]] = landingpad { ptr, i32 }
1340 // CHECK1-NEXT: catch ptr null
1341 // CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP16]], 0
1342 // CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP17]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP42]]
1343 // CHECK1-NEXT: unreachable
1346 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
1347 // CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
1348 // CHECK1-NEXT: entry:
1349 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1350 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1351 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
1352 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
1353 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1354 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
1355 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1356 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1357 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1358 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1359 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
1360 // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1361 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1362 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1363 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1364 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1365 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
1366 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
1367 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
1368 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
1369 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
1370 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
1371 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
1372 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1373 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1374 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1375 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
1376 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1377 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1378 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
1379 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1380 // CHECK1: cond.true:
1381 // CHECK1-NEXT: br label [[COND_END:%.*]]
1382 // CHECK1: cond.false:
1383 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
1384 // CHECK1-NEXT: br label [[COND_END]]
1385 // CHECK1: cond.end:
1386 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
1387 // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
1388 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1389 // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
1390 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1391 // CHECK1: omp.inner.for.cond:
1392 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]]
1393 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]]
1394 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
1395 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1396 // CHECK1: omp.inner.for.body:
1397 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
1398 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
1399 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1400 // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]]
1401 // CHECK1-NEXT: invoke void @_Z3foov()
1402 // CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP45]]
1403 // CHECK1: invoke.cont:
1404 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1405 // CHECK1: omp.body.continue:
1406 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1407 // CHECK1: omp.inner.for.inc:
1408 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
1409 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
1410 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
1411 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
1412 // CHECK1: omp.inner.for.end:
1413 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1414 // CHECK1: omp.loop.exit:
1415 // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
1416 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1417 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
1418 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1419 // CHECK1: .omp.final.then:
1420 // CHECK1-NEXT: store i32 100, ptr [[I]], align 4
1421 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]]
1422 // CHECK1: .omp.final.done:
1423 // CHECK1-NEXT: ret void
1424 // CHECK1: terminate.lpad:
1425 // CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
1426 // CHECK1-NEXT: catch ptr null
1427 // CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
1428 // CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP45]]
1429 // CHECK1-NEXT: unreachable
1432 // CHECK3-LABEL: define {{[^@]+}}@main
1433 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
1434 // CHECK3-NEXT: entry:
1435 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
1436 // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
1437 // CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1
1438 // CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
1439 // CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1440 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
1441 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1442 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1443 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1444 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
1445 // CHECK3-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
1446 // CHECK3-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
1447 // CHECK3-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
1448 // CHECK3-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
1449 // CHECK3-NEXT: [[I7:%.*]] = alloca i32, align 4
1450 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4
1451 // CHECK3-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
1452 // CHECK3-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
1453 // CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1454 // CHECK3: invoke.cont:
1455 // CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1
1456 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1457 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
1458 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1459 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
1460 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1461 // CHECK3: omp.inner.for.cond:
1462 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
1463 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
1464 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
1465 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1466 // CHECK3: omp.inner.for.body:
1467 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
1468 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
1469 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1470 // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
1471 // CHECK3-NEXT: invoke void @_Z3foov()
1472 // CHECK3-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]]
1473 // CHECK3: invoke.cont1:
1474 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1475 // CHECK3: omp.body.continue:
1476 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1477 // CHECK3: omp.inner.for.inc:
1478 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
1479 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1
1480 // CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
1481 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
1482 // CHECK3: lpad:
1483 // CHECK3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
1484 // CHECK3-NEXT: cleanup
1485 // CHECK3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
1486 // CHECK3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
1487 // CHECK3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
1488 // CHECK3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4
1489 // CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5:[0-9]+]]
1490 // CHECK3-NEXT: br label [[EH_RESUME:%.*]]
1491 // CHECK3: omp.inner.for.end:
1492 // CHECK3-NEXT: store i32 100, ptr [[I]], align 4
1493 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
1494 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4
1495 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
1496 // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4
1497 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
1498 // CHECK3: omp.inner.for.cond8:
1499 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
1500 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]]
1501 // CHECK3-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
1502 // CHECK3-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
1503 // CHECK3: omp.inner.for.body10:
1504 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
1505 // CHECK3-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1
1506 // CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
1507 // CHECK3-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
1508 // CHECK3-NEXT: invoke void @_Z3foov()
1509 // CHECK3-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]]
1510 // CHECK3: invoke.cont13:
1511 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]]
1512 // CHECK3: omp.body.continue14:
1513 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]]
1514 // CHECK3: omp.inner.for.inc15:
1515 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
1516 // CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1
1517 // CHECK3-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
1518 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
1519 // CHECK3: omp.inner.for.end17:
1520 // CHECK3-NEXT: store i32 100, ptr [[I7]], align 4
1521 // CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1
1522 // CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
1523 // CHECK3-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
1524 // CHECK3-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]]
1525 // CHECK3: invoke.cont18:
1526 // CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]]
1527 // CHECK3-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
1528 // CHECK3-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]]
1529 // CHECK3: invoke.cont21:
1530 // CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]]
1531 // CHECK3-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4
1532 // CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]]
1533 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4
1534 // CHECK3-NEXT: ret i32 [[TMP14]]
1535 // CHECK3: eh.resume:
1536 // CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
1537 // CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
1538 // CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
1539 // CHECK3-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
1540 // CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL24]]
1541 // CHECK3: terminate.lpad:
1542 // CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
1543 // CHECK3-NEXT: catch ptr null
1544 // CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
1545 // CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR6:[0-9]+]], !llvm.access.group [[ACC_GRP2]]
1546 // CHECK3-NEXT: unreachable
1549 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El
1550 // CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1551 // CHECK3-NEXT: entry:
1552 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1553 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
1554 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1555 // CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
1556 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1557 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
1558 // CHECK3-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
1559 // CHECK3-NEXT: ret void
1562 // CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv
1563 // CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
1564 // CHECK3-NEXT: entry:
1565 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1566 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1567 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1568 // CHECK3-NEXT: call void @_Z8mayThrowv()
1569 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
1570 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
1571 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
1572 // CHECK3-NEXT: ret i8 [[CONV]]
1575 // CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate
1576 // CHECK3-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
1577 // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR5]]
1578 // CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR6]]
1579 // CHECK3-NEXT: unreachable
1582 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
1583 // CHECK3-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 {
1584 // CHECK3-NEXT: entry:
1585 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
1586 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1587 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1588 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1589 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
1590 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
1591 // CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
1592 // CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
1593 // CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
1594 // CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4
1595 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1596 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
1597 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1598 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
1599 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1600 // CHECK3: omp.inner.for.cond:
1601 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
1602 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
1603 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
1604 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1605 // CHECK3: omp.inner.for.body:
1606 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
1607 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
1608 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1609 // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
1610 // CHECK3-NEXT: invoke void @_Z3foov()
1611 // CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]]
1612 // CHECK3: invoke.cont:
1613 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1614 // CHECK3: omp.body.continue:
1615 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1616 // CHECK3: omp.inner.for.inc:
1617 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
1618 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
1619 // CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
1620 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
1621 // CHECK3: omp.inner.for.end:
1622 // CHECK3-NEXT: store i32 100, ptr [[I]], align 4
1623 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
1624 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
1625 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
1626 // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
1627 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
1628 // CHECK3: omp.inner.for.cond7:
1629 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
1630 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]]
1631 // CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1632 // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
1633 // CHECK3: omp.inner.for.body9:
1634 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
1635 // CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
1636 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
1637 // CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]]
1638 // CHECK3-NEXT: invoke void @_Z3foov()
1639 // CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]]
1640 // CHECK3: invoke.cont12:
1641 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
1642 // CHECK3: omp.body.continue13:
1643 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
1644 // CHECK3: omp.inner.for.inc14:
1645 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
1646 // CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
1647 // CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
1648 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]]
1649 // CHECK3: omp.inner.for.end16:
1650 // CHECK3-NEXT: store i32 100, ptr [[I6]], align 4
1651 // CHECK3-NEXT: ret i32 0
1652 // CHECK3: terminate.lpad:
1653 // CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
1654 // CHECK3-NEXT: catch ptr null
1655 // CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
1656 // CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR6]], !llvm.access.group [[ACC_GRP9]]
1657 // CHECK3-NEXT: unreachable
1660 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
1661 // CHECK3-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 {
1662 // CHECK3-NEXT: entry:
1663 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
1664 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
1665 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
1666 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1667 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
1668 // CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
1669 // CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
1670 // CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
1671 // CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
1672 // CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4
1673 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
1674 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
1675 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
1676 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
1677 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1678 // CHECK3: omp.inner.for.cond:
1679 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
1680 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
1681 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
1682 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1683 // CHECK3: omp.inner.for.body:
1684 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
1685 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
1686 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1687 // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]]
1688 // CHECK3-NEXT: invoke void @_Z3foov()
1689 // CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]]
1690 // CHECK3: invoke.cont:
1691 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
1692 // CHECK3: omp.body.continue:
1693 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1694 // CHECK3: omp.inner.for.inc:
1695 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
1696 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
1697 // CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
1698 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
1699 // CHECK3: omp.inner.for.end:
1700 // CHECK3-NEXT: store i32 100, ptr [[I]], align 4
1701 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
1702 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
1703 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
1704 // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
1705 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
1706 // CHECK3: omp.inner.for.cond7:
1707 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
1708 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]]
1709 // CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
1710 // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
1711 // CHECK3: omp.inner.for.body9:
1712 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
1713 // CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
1714 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
1715 // CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]]
1716 // CHECK3-NEXT: invoke void @_Z3foov()
1717 // CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]]
1718 // CHECK3: invoke.cont12:
1719 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
1720 // CHECK3: omp.body.continue13:
1721 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
1722 // CHECK3: omp.inner.for.inc14:
1723 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
1724 // CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
1725 // CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
1726 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]]
1727 // CHECK3: omp.inner.for.end16:
1728 // CHECK3-NEXT: store i32 100, ptr [[I6]], align 4
1729 // CHECK3-NEXT: ret i32 0
1730 // CHECK3: terminate.lpad:
1731 // CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
1732 // CHECK3-NEXT: catch ptr null
1733 // CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
1734 // CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR6]], !llvm.access.group [[ACC_GRP15]]
1735 // CHECK3-NEXT: unreachable
1738 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev
1739 // CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
1740 // CHECK3-NEXT: entry:
1741 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1742 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1743 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1744 // CHECK3-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR5]]
1745 // CHECK3-NEXT: ret void
1748 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El
1749 // CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
1750 // CHECK3-NEXT: entry:
1751 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1752 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
1753 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1754 // CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
1755 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1756 // CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
1757 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
1758 // CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
1759 // CHECK3-NEXT: ret void
1762 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev
1763 // CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 {
1764 // CHECK3-NEXT: entry:
1765 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1766 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1767 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1768 // CHECK3-NEXT: ret void
1771 // CHECK5-LABEL: define {{[^@]+}}@main
1772 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
1773 // CHECK5-NEXT: entry:
1774 // CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
1775 // CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
1776 // CHECK5-NEXT: [[A:%.*]] = alloca i8, align 1
1777 // CHECK5-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
1778 // CHECK5-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
1779 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
1780 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
1781 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
1782 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
1783 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
1784 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
1785 // CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
1786 // CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
1787 // CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4
1788 // CHECK5-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
1789 // CHECK5-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
1790 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
1791 // CHECK5: invoke.cont:
1792 // CHECK5-NEXT: store i8 [[CALL]], ptr [[A]], align 1
1793 // CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
1794 // CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4
1795 // CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
1796 // CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4
1797 // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
1798 // CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8
1799 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
1800 // CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8
1801 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
1802 // CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8
1803 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
1804 // CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8
1805 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
1806 // CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8
1807 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
1808 // CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8
1809 // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
1810 // CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8
1811 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
1812 // CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8
1813 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
1814 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
1815 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
1816 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
1817 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
1818 // CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4
1819 // CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
1820 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1821 // CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
1822 // CHECK5: omp_offload.failed:
1823 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
1824 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
1825 // CHECK5: lpad:
1826 // CHECK5-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
1827 // CHECK5-NEXT: cleanup
1828 // CHECK5-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
1829 // CHECK5-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
1830 // CHECK5-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
1831 // CHECK5-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
1832 // CHECK5-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
1833 // CHECK5-NEXT: br label [[EH_RESUME:%.*]]
1834 // CHECK5: omp_offload.cont:
1835 // CHECK5-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
1836 // CHECK5-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
1837 // CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
1838 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1839 // CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8
1840 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1841 // CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
1842 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
1843 // CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8
1844 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
1845 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
1846 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
1847 // CHECK5-NEXT: store i32 3, ptr [[TMP25]], align 4
1848 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
1849 // CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4
1850 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
1851 // CHECK5-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
1852 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
1853 // CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
1854 // CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
1855 // CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8
1856 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
1857 // CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8
1858 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
1859 // CHECK5-NEXT: store ptr null, ptr [[TMP31]], align 8
1860 // CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
1861 // CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8
1862 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
1863 // CHECK5-NEXT: store i64 100, ptr [[TMP33]], align 8
1864 // CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
1865 // CHECK5-NEXT: store i64 0, ptr [[TMP34]], align 8
1866 // CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
1867 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
1868 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
1869 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
1870 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
1871 // CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4
1872 // CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
1873 // CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
1874 // CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
1875 // CHECK5: omp_offload.failed3:
1876 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
1877 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]]
1878 // CHECK5: omp_offload.cont4:
1879 // CHECK5-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
1880 // CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32
1881 // CHECK5-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
1882 // CHECK5-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
1883 // CHECK5: invoke.cont5:
1884 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
1885 // CHECK5-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
1886 // CHECK5-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
1887 // CHECK5: invoke.cont7:
1888 // CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
1889 // CHECK5-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
1890 // CHECK5-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
1891 // CHECK5-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4
1892 // CHECK5-NEXT: ret i32 [[TMP41]]
1893 // CHECK5: eh.resume:
1894 // CHECK5-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
1895 // CHECK5-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
1896 // CHECK5-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
1897 // CHECK5-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
1898 // CHECK5-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
1901 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SC1El
1902 // CHECK5-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 {
1903 // CHECK5-NEXT: entry:
1904 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1905 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
1906 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1907 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
1908 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1909 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
1910 // CHECK5-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
1911 // CHECK5-NEXT: ret void
1914 // CHECK5-LABEL: define {{[^@]+}}@_ZN1ScvcEv
1915 // CHECK5-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 {
1916 // CHECK5-NEXT: entry:
1917 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
1918 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
1919 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
1920 // CHECK5-NEXT: call void @_Z8mayThrowv()
1921 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
1922 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
1923 // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
1924 // CHECK5-NEXT: ret i8 [[CONV]]
1927 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
1928 // CHECK5-SAME: () #[[ATTR2:[0-9]+]] {
1929 // CHECK5-NEXT: entry:
1930 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
1931 // CHECK5-NEXT: ret void
1934 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
1935 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
1936 // CHECK5-NEXT: entry:
1937 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
1938 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
1939 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
1940 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
1941 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
1942 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
1943 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
1944 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
1945 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
1946 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
1947 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
1948 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
1949 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
1950 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
1951 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
1952 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
1953 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
1954 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
1955 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1956 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
1957 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1958 // CHECK5: cond.true:
1959 // CHECK5-NEXT: br label [[COND_END:%.*]]
1960 // CHECK5: cond.false:
1961 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
1962 // CHECK5-NEXT: br label [[COND_END]]
1963 // CHECK5: cond.end:
1964 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
1965 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
1966 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
1967 // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
1968 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
1969 // CHECK5: omp.inner.for.cond:
1970 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
1971 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
1972 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
1973 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1974 // CHECK5: omp.inner.for.body:
1975 // CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group [[ACC_GRP9]]
1976 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
1977 // CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
1978 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
1979 // CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
1980 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP9]]
1981 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
1982 // CHECK5: omp.inner.for.inc:
1983 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
1984 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
1985 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
1986 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
1987 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
1988 // CHECK5: omp.inner.for.end:
1989 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
1990 // CHECK5: omp.loop.exit:
1991 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
1992 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
1993 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
1994 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
1995 // CHECK5: .omp.final.then:
1996 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
1997 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
1998 // CHECK5: .omp.final.done:
1999 // CHECK5-NEXT: ret void
2002 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
2003 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
2004 // CHECK5-NEXT: entry:
2005 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2006 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2007 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2008 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2009 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2010 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2011 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2012 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2013 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2014 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2015 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2016 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2017 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2018 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2019 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2020 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2021 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
2022 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2023 // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
2024 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2025 // CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
2026 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2027 // CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2028 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2029 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2030 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2031 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
2032 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2033 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2034 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
2035 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2036 // CHECK5: cond.true:
2037 // CHECK5-NEXT: br label [[COND_END:%.*]]
2038 // CHECK5: cond.false:
2039 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2040 // CHECK5-NEXT: br label [[COND_END]]
2041 // CHECK5: cond.end:
2042 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2043 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2044 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2045 // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
2046 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2047 // CHECK5: omp.inner.for.cond:
2048 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
2049 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
2050 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2051 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2052 // CHECK5: omp.inner.for.body:
2053 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2054 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
2055 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2056 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
2057 // CHECK5-NEXT: invoke void @_Z3foov()
2058 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP13]]
2059 // CHECK5: invoke.cont:
2060 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2061 // CHECK5: omp.body.continue:
2062 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2063 // CHECK5: omp.inner.for.inc:
2064 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2065 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
2066 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
2067 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
2068 // CHECK5: omp.inner.for.end:
2069 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2070 // CHECK5: omp.loop.exit:
2071 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
2072 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2073 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2074 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2075 // CHECK5: .omp.final.then:
2076 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2077 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2078 // CHECK5: .omp.final.done:
2079 // CHECK5-NEXT: ret void
2080 // CHECK5: terminate.lpad:
2081 // CHECK5-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
2082 // CHECK5-NEXT: catch ptr null
2083 // CHECK5-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
2084 // CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP13]]
2085 // CHECK5-NEXT: unreachable
2088 // CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate
2089 // CHECK5-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
2090 // CHECK5-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
2091 // CHECK5-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
2092 // CHECK5-NEXT: unreachable
2095 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
2096 // CHECK5-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
2097 // CHECK5-NEXT: entry:
2098 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
2099 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
2100 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
2101 // CHECK5-NEXT: ret void
2104 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
2105 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
2106 // CHECK5-NEXT: entry:
2107 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2108 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2109 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
2110 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2111 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2112 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2113 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2114 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2115 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2116 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2117 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2118 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2119 // CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
2120 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
2121 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2122 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
2123 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2124 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2125 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2126 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
2127 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2128 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2129 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
2130 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2131 // CHECK5: cond.true:
2132 // CHECK5-NEXT: br label [[COND_END:%.*]]
2133 // CHECK5: cond.false:
2134 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2135 // CHECK5-NEXT: br label [[COND_END]]
2136 // CHECK5: cond.end:
2137 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
2138 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2139 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2140 // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
2141 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2142 // CHECK5: omp.inner.for.cond:
2143 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
2144 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
2145 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
2146 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2147 // CHECK5: omp.inner.for.body:
2148 // CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP18]]
2149 // CHECK5-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
2150 // CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]), !llvm.access.group [[ACC_GRP18]]
2151 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
2152 // CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2153 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
2154 // CHECK5-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
2155 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP18]]
2156 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2157 // CHECK5: omp.inner.for.inc:
2158 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
2159 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
2160 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
2161 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
2162 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
2163 // CHECK5: omp.inner.for.end:
2164 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2165 // CHECK5: omp.loop.exit:
2166 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
2167 // CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2168 // CHECK5-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
2169 // CHECK5-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2170 // CHECK5: .omp.final.then:
2171 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2172 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2173 // CHECK5: .omp.final.done:
2174 // CHECK5-NEXT: ret void
2177 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
2178 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
2179 // CHECK5-NEXT: entry:
2180 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2181 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2182 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2183 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2184 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2185 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2186 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2187 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2188 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2189 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2190 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2191 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2192 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2193 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2194 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2195 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2196 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
2197 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2198 // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
2199 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2200 // CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
2201 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2202 // CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2203 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2204 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2205 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2206 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
2207 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2208 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2209 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
2210 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2211 // CHECK5: cond.true:
2212 // CHECK5-NEXT: br label [[COND_END:%.*]]
2213 // CHECK5: cond.false:
2214 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2215 // CHECK5-NEXT: br label [[COND_END]]
2216 // CHECK5: cond.end:
2217 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2218 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2219 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2220 // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
2221 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2222 // CHECK5: omp.inner.for.cond:
2223 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
2224 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
2225 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2226 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2227 // CHECK5: omp.inner.for.body:
2228 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2229 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
2230 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2231 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
2232 // CHECK5-NEXT: invoke void @_Z3foov()
2233 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP21]]
2234 // CHECK5: invoke.cont:
2235 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2236 // CHECK5: omp.body.continue:
2237 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2238 // CHECK5: omp.inner.for.inc:
2239 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2240 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
2241 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
2242 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
2243 // CHECK5: omp.inner.for.end:
2244 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2245 // CHECK5: omp.loop.exit:
2246 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
2247 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2248 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2249 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2250 // CHECK5: .omp.final.then:
2251 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2252 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2253 // CHECK5: .omp.final.done:
2254 // CHECK5-NEXT: ret void
2255 // CHECK5: terminate.lpad:
2256 // CHECK5-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
2257 // CHECK5-NEXT: catch ptr null
2258 // CHECK5-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
2259 // CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP21]]
2260 // CHECK5-NEXT: unreachable
2263 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
2264 // CHECK5-SAME: () #[[ATTR6:[0-9]+]] comdat {
2265 // CHECK5-NEXT: entry:
2266 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2267 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
2268 // CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
2269 // CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2270 // CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
2271 // CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4
2272 // CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
2273 // CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4
2274 // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
2275 // CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8
2276 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
2277 // CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8
2278 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
2279 // CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8
2280 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
2281 // CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8
2282 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
2283 // CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8
2284 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
2285 // CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8
2286 // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
2287 // CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8
2288 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
2289 // CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8
2290 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
2291 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
2292 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
2293 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
2294 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
2295 // CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4
2296 // CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
2297 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2298 // CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2299 // CHECK5: omp_offload.failed:
2300 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
2301 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
2302 // CHECK5: omp_offload.cont:
2303 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
2304 // CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4
2305 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
2306 // CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4
2307 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
2308 // CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8
2309 // CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
2310 // CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8
2311 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
2312 // CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8
2313 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
2314 // CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8
2315 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
2316 // CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8
2317 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
2318 // CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8
2319 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
2320 // CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8
2321 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
2322 // CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8
2323 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
2324 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
2325 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
2326 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
2327 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
2328 // CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4
2329 // CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
2330 // CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
2331 // CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
2332 // CHECK5: omp_offload.failed3:
2333 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
2334 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]]
2335 // CHECK5: omp_offload.cont4:
2336 // CHECK5-NEXT: ret i32 0
2339 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
2340 // CHECK5-SAME: () #[[ATTR6]] comdat {
2341 // CHECK5-NEXT: entry:
2342 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2343 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
2344 // CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
2345 // CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
2346 // CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
2347 // CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4
2348 // CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
2349 // CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4
2350 // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
2351 // CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8
2352 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
2353 // CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8
2354 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
2355 // CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8
2356 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
2357 // CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8
2358 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
2359 // CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8
2360 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
2361 // CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8
2362 // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
2363 // CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8
2364 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
2365 // CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8
2366 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
2367 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
2368 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
2369 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
2370 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
2371 // CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4
2372 // CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
2373 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2374 // CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
2375 // CHECK5: omp_offload.failed:
2376 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
2377 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]]
2378 // CHECK5: omp_offload.cont:
2379 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
2380 // CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4
2381 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
2382 // CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4
2383 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
2384 // CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8
2385 // CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
2386 // CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8
2387 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
2388 // CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8
2389 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
2390 // CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8
2391 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
2392 // CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8
2393 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
2394 // CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8
2395 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
2396 // CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8
2397 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
2398 // CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8
2399 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
2400 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
2401 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
2402 // CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
2403 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
2404 // CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4
2405 // CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
2406 // CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
2407 // CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
2408 // CHECK5: omp_offload.failed3:
2409 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
2410 // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]]
2411 // CHECK5: omp_offload.cont4:
2412 // CHECK5-NEXT: ret i32 0
2415 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SD1Ev
2416 // CHECK5-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
2417 // CHECK5-NEXT: entry:
2418 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2419 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2420 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2421 // CHECK5-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
2422 // CHECK5-NEXT: ret void
2425 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SC2El
2426 // CHECK5-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
2427 // CHECK5-NEXT: entry:
2428 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
2429 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
2430 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
2431 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
2432 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
2433 // CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
2434 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
2435 // CHECK5-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
2436 // CHECK5-NEXT: ret void
2439 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
2440 // CHECK5-SAME: () #[[ATTR2]] {
2441 // CHECK5-NEXT: entry:
2442 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
2443 // CHECK5-NEXT: ret void
2446 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
2447 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
2448 // CHECK5-NEXT: entry:
2449 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2450 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2451 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2452 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2453 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2454 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2455 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2456 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2457 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2458 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2459 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2460 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2461 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
2462 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2463 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2464 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2465 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
2466 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2467 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2468 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
2469 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2470 // CHECK5: cond.true:
2471 // CHECK5-NEXT: br label [[COND_END:%.*]]
2472 // CHECK5: cond.false:
2473 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2474 // CHECK5-NEXT: br label [[COND_END]]
2475 // CHECK5: cond.end:
2476 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2477 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2478 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2479 // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
2480 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2481 // CHECK5: omp.inner.for.cond:
2482 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
2483 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
2484 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2485 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2486 // CHECK5: omp.inner.for.body:
2487 // CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5), !llvm.access.group [[ACC_GRP24]]
2488 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
2489 // CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
2490 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
2491 // CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
2492 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP24]]
2493 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2494 // CHECK5: omp.inner.for.inc:
2495 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
2496 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
2497 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
2498 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
2499 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
2500 // CHECK5: omp.inner.for.end:
2501 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2502 // CHECK5: omp.loop.exit:
2503 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
2504 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2505 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2506 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2507 // CHECK5: .omp.final.then:
2508 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2509 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2510 // CHECK5: .omp.final.done:
2511 // CHECK5-NEXT: ret void
2514 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
2515 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
2516 // CHECK5-NEXT: entry:
2517 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2518 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2519 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2520 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2521 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2522 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2523 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2524 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2525 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2526 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2527 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2528 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2529 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2530 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2531 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2532 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2533 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
2534 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2535 // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
2536 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2537 // CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
2538 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2539 // CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2540 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2541 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2542 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2543 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
2544 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2545 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2546 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
2547 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2548 // CHECK5: cond.true:
2549 // CHECK5-NEXT: br label [[COND_END:%.*]]
2550 // CHECK5: cond.false:
2551 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2552 // CHECK5-NEXT: br label [[COND_END]]
2553 // CHECK5: cond.end:
2554 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2555 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2556 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2557 // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
2558 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2559 // CHECK5: omp.inner.for.cond:
2560 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
2561 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
2562 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2563 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2564 // CHECK5: omp.inner.for.body:
2565 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
2566 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
2567 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2568 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
2569 // CHECK5-NEXT: invoke void @_Z3foov()
2570 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP27]]
2571 // CHECK5: invoke.cont:
2572 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2573 // CHECK5: omp.body.continue:
2574 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2575 // CHECK5: omp.inner.for.inc:
2576 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
2577 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
2578 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
2579 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
2580 // CHECK5: omp.inner.for.end:
2581 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2582 // CHECK5: omp.loop.exit:
2583 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
2584 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2585 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2586 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2587 // CHECK5: .omp.final.then:
2588 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2589 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2590 // CHECK5: .omp.final.done:
2591 // CHECK5-NEXT: ret void
2592 // CHECK5: terminate.lpad:
2593 // CHECK5-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
2594 // CHECK5-NEXT: catch ptr null
2595 // CHECK5-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
2596 // CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP27]]
2597 // CHECK5-NEXT: unreachable
2600 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
2601 // CHECK5-SAME: () #[[ATTR2]] {
2602 // CHECK5-NEXT: entry:
2603 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
2604 // CHECK5-NEXT: ret void
2607 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
2608 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
2609 // CHECK5-NEXT: entry:
2610 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2611 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2612 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2613 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2614 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2615 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2616 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2617 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2618 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2619 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2620 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2621 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2622 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
2623 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2624 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2625 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2626 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
2627 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2628 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2629 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
2630 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2631 // CHECK5: cond.true:
2632 // CHECK5-NEXT: br label [[COND_END:%.*]]
2633 // CHECK5: cond.false:
2634 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2635 // CHECK5-NEXT: br label [[COND_END]]
2636 // CHECK5: cond.end:
2637 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2638 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2639 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2640 // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
2641 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2642 // CHECK5: omp.inner.for.cond:
2643 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
2644 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
2645 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2646 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2647 // CHECK5: omp.inner.for.body:
2648 // CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23), !llvm.access.group [[ACC_GRP30]]
2649 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
2650 // CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
2651 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
2652 // CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
2653 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP30]]
2654 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2655 // CHECK5: omp.inner.for.inc:
2656 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
2657 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
2658 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
2659 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
2660 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
2661 // CHECK5: omp.inner.for.end:
2662 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2663 // CHECK5: omp.loop.exit:
2664 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
2665 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2666 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2667 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2668 // CHECK5: .omp.final.then:
2669 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2670 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2671 // CHECK5: .omp.final.done:
2672 // CHECK5-NEXT: ret void
2675 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
2676 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
2677 // CHECK5-NEXT: entry:
2678 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2679 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2680 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2681 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2682 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2683 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2684 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2685 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2686 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2687 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2688 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2689 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2690 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2691 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2692 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2693 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2694 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
2695 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2696 // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
2697 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2698 // CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
2699 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2700 // CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2701 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2702 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2703 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2704 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
2705 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2706 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2707 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
2708 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2709 // CHECK5: cond.true:
2710 // CHECK5-NEXT: br label [[COND_END:%.*]]
2711 // CHECK5: cond.false:
2712 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2713 // CHECK5-NEXT: br label [[COND_END]]
2714 // CHECK5: cond.end:
2715 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2716 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2717 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2718 // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
2719 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2720 // CHECK5: omp.inner.for.cond:
2721 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
2722 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
2723 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2724 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2725 // CHECK5: omp.inner.for.body:
2726 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
2727 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
2728 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2729 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
2730 // CHECK5-NEXT: invoke void @_Z3foov()
2731 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP33]]
2732 // CHECK5: invoke.cont:
2733 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2734 // CHECK5: omp.body.continue:
2735 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2736 // CHECK5: omp.inner.for.inc:
2737 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
2738 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
2739 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
2740 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
2741 // CHECK5: omp.inner.for.end:
2742 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2743 // CHECK5: omp.loop.exit:
2744 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
2745 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2746 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2747 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2748 // CHECK5: .omp.final.then:
2749 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2750 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2751 // CHECK5: .omp.final.done:
2752 // CHECK5-NEXT: ret void
2753 // CHECK5: terminate.lpad:
2754 // CHECK5-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
2755 // CHECK5-NEXT: catch ptr null
2756 // CHECK5-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
2757 // CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP33]]
2758 // CHECK5-NEXT: unreachable
2761 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
2762 // CHECK5-SAME: () #[[ATTR2]] {
2763 // CHECK5-NEXT: entry:
2764 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
2765 // CHECK5-NEXT: ret void
2768 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
2769 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
2770 // CHECK5-NEXT: entry:
2771 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2772 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2773 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2774 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2775 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2776 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2777 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2778 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2779 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2780 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2781 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2782 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2783 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
2784 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2785 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2786 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2787 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
2788 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2789 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2790 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
2791 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2792 // CHECK5: cond.true:
2793 // CHECK5-NEXT: br label [[COND_END:%.*]]
2794 // CHECK5: cond.false:
2795 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2796 // CHECK5-NEXT: br label [[COND_END]]
2797 // CHECK5: cond.end:
2798 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2799 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2800 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2801 // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
2802 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2803 // CHECK5: omp.inner.for.cond:
2804 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
2805 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
2806 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2807 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2808 // CHECK5: omp.inner.for.body:
2809 // CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1), !llvm.access.group [[ACC_GRP36]]
2810 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
2811 // CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
2812 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
2813 // CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
2814 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP36]]
2815 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2816 // CHECK5: omp.inner.for.inc:
2817 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
2818 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
2819 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
2820 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
2821 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
2822 // CHECK5: omp.inner.for.end:
2823 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2824 // CHECK5: omp.loop.exit:
2825 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
2826 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2827 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
2828 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2829 // CHECK5: .omp.final.then:
2830 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2831 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2832 // CHECK5: .omp.final.done:
2833 // CHECK5-NEXT: ret void
2836 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
2837 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
2838 // CHECK5-NEXT: entry:
2839 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2840 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2841 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
2842 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
2843 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2844 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2845 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
2846 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
2847 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2848 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2849 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2850 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2851 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2852 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2853 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2854 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
2855 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
2856 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
2857 // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
2858 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
2859 // CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
2860 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
2861 // CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
2862 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2863 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2864 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2865 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
2866 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2867 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2868 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
2869 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2870 // CHECK5: cond.true:
2871 // CHECK5-NEXT: br label [[COND_END:%.*]]
2872 // CHECK5: cond.false:
2873 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
2874 // CHECK5-NEXT: br label [[COND_END]]
2875 // CHECK5: cond.end:
2876 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
2877 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
2878 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
2879 // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
2880 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2881 // CHECK5: omp.inner.for.cond:
2882 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
2883 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
2884 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
2885 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2886 // CHECK5: omp.inner.for.body:
2887 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
2888 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
2889 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2890 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
2891 // CHECK5-NEXT: invoke void @_Z3foov()
2892 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP39]]
2893 // CHECK5: invoke.cont:
2894 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
2895 // CHECK5: omp.body.continue:
2896 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2897 // CHECK5: omp.inner.for.inc:
2898 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
2899 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
2900 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
2901 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
2902 // CHECK5: omp.inner.for.end:
2903 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2904 // CHECK5: omp.loop.exit:
2905 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
2906 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2907 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
2908 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2909 // CHECK5: .omp.final.then:
2910 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
2911 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
2912 // CHECK5: .omp.final.done:
2913 // CHECK5-NEXT: ret void
2914 // CHECK5: terminate.lpad:
2915 // CHECK5-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
2916 // CHECK5-NEXT: catch ptr null
2917 // CHECK5-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
2918 // CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP39]]
2919 // CHECK5-NEXT: unreachable
2922 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
2923 // CHECK5-SAME: () #[[ATTR2]] {
2924 // CHECK5-NEXT: entry:
2925 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
2926 // CHECK5-NEXT: ret void
2929 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
2930 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
2931 // CHECK5-NEXT: entry:
2932 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
2933 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
2934 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
2935 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
2936 // CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
2937 // CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
2938 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
2939 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
2940 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
2941 // CHECK5-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
2942 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
2943 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
2944 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
2945 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
2946 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
2947 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
2948 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
2949 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
2950 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
2951 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2952 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
2953 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
2954 // CHECK5: cond.true:
2955 // CHECK5-NEXT: br label [[COND_END:%.*]]
2956 // CHECK5: cond.false:
2957 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
2958 // CHECK5-NEXT: br label [[COND_END]]
2959 // CHECK5: cond.end:
2960 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
2961 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
2962 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
2963 // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
2964 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
2965 // CHECK5: omp.inner.for.cond:
2966 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]]
2967 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
2968 // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
2969 // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2970 // CHECK5: omp.inner.for.body:
2971 // CHECK5-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23)
2972 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP42]]
2973 // CHECK5: invoke.cont:
2974 // CHECK5-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]])
2975 // CHECK5-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP42]]
2976 // CHECK5: invoke.cont2:
2977 // CHECK5-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32
2978 // CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]), !llvm.access.group [[ACC_GRP42]]
2979 // CHECK5-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP42]]
2980 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP42]]
2981 // CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
2982 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
2983 // CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
2984 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP42]]
2985 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
2986 // CHECK5: omp.inner.for.inc:
2987 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
2988 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP42]]
2989 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
2990 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
2991 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
2992 // CHECK5: omp.inner.for.end:
2993 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
2994 // CHECK5: omp.loop.exit:
2995 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
2996 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
2997 // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
2998 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
2999 // CHECK5: .omp.final.then:
3000 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
3001 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3002 // CHECK5: .omp.final.done:
3003 // CHECK5-NEXT: ret void
3004 // CHECK5: terminate.lpad:
3005 // CHECK5-NEXT: [[TMP16:%.*]] = landingpad { ptr, i32 }
3006 // CHECK5-NEXT: catch ptr null
3007 // CHECK5-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP16]], 0
3008 // CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP17]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP42]]
3009 // CHECK5-NEXT: unreachable
3012 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
3013 // CHECK5-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
3014 // CHECK5-NEXT: entry:
3015 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3016 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3017 // CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3018 // CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3019 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3020 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4
3021 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3022 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3023 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3024 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3025 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4
3026 // CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3027 // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3028 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3029 // CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3030 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3031 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
3032 // CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3033 // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
3034 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3035 // CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
3036 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
3037 // CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
3038 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3039 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3040 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3041 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
3042 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3043 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3044 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
3045 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3046 // CHECK5: cond.true:
3047 // CHECK5-NEXT: br label [[COND_END:%.*]]
3048 // CHECK5: cond.false:
3049 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3050 // CHECK5-NEXT: br label [[COND_END]]
3051 // CHECK5: cond.end:
3052 // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
3053 // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3054 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3055 // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
3056 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3057 // CHECK5: omp.inner.for.cond:
3058 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]]
3059 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]]
3060 // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
3061 // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3062 // CHECK5: omp.inner.for.body:
3063 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
3064 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
3065 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3066 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]]
3067 // CHECK5-NEXT: invoke void @_Z3foov()
3068 // CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP45]]
3069 // CHECK5: invoke.cont:
3070 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3071 // CHECK5: omp.body.continue:
3072 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3073 // CHECK5: omp.inner.for.inc:
3074 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
3075 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
3076 // CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
3077 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
3078 // CHECK5: omp.inner.for.end:
3079 // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3080 // CHECK5: omp.loop.exit:
3081 // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
3082 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3083 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
3084 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3085 // CHECK5: .omp.final.then:
3086 // CHECK5-NEXT: store i32 100, ptr [[I]], align 4
3087 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]]
3088 // CHECK5: .omp.final.done:
3089 // CHECK5-NEXT: ret void
3090 // CHECK5: terminate.lpad:
3091 // CHECK5-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
3092 // CHECK5-NEXT: catch ptr null
3093 // CHECK5-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
3094 // CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP45]]
3095 // CHECK5-NEXT: unreachable
3098 // CHECK5-LABEL: define {{[^@]+}}@_ZN1SD2Ev
3099 // CHECK5-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 {
3100 // CHECK5-NEXT: entry:
3101 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
3102 // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
3103 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
3104 // CHECK5-NEXT: ret void
3107 // CHECK9-LABEL: define {{[^@]+}}@main
3108 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
3109 // CHECK9-NEXT: entry:
3110 // CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
3111 // CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
3112 // CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1
3113 // CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
3114 // CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
3115 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3116 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
3117 // CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
3118 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
3119 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
3120 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
3121 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
3122 // CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3123 // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4
3124 // CHECK9-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
3125 // CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
3126 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
3127 // CHECK9: invoke.cont:
3128 // CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1
3129 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
3130 // CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4
3131 // CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
3132 // CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4
3133 // CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
3134 // CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8
3135 // CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
3136 // CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8
3137 // CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
3138 // CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8
3139 // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
3140 // CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8
3141 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
3142 // CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8
3143 // CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
3144 // CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8
3145 // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
3146 // CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8
3147 // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
3148 // CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8
3149 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
3150 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
3151 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
3152 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
3153 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
3154 // CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
3155 // CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
3156 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3157 // CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3158 // CHECK9: omp_offload.failed:
3159 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
3160 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
3161 // CHECK9: lpad:
3162 // CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
3163 // CHECK9-NEXT: cleanup
3164 // CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
3165 // CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
3166 // CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
3167 // CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
3168 // CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
3169 // CHECK9-NEXT: br label [[EH_RESUME:%.*]]
3170 // CHECK9: omp_offload.cont:
3171 // CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
3172 // CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
3173 // CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
3174 // CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3175 // CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8
3176 // CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3177 // CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
3178 // CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
3179 // CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8
3180 // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
3181 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
3182 // CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
3183 // CHECK9-NEXT: store i32 3, ptr [[TMP25]], align 4
3184 // CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
3185 // CHECK9-NEXT: store i32 1, ptr [[TMP26]], align 4
3186 // CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
3187 // CHECK9-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
3188 // CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
3189 // CHECK9-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
3190 // CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
3191 // CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8
3192 // CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
3193 // CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8
3194 // CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
3195 // CHECK9-NEXT: store ptr null, ptr [[TMP31]], align 8
3196 // CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
3197 // CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8
3198 // CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
3199 // CHECK9-NEXT: store i64 100, ptr [[TMP33]], align 8
3200 // CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
3201 // CHECK9-NEXT: store i64 0, ptr [[TMP34]], align 8
3202 // CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
3203 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
3204 // CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
3205 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
3206 // CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
3207 // CHECK9-NEXT: store i32 0, ptr [[TMP37]], align 4
3208 // CHECK9-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
3209 // CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
3210 // CHECK9-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
3211 // CHECK9: omp_offload.failed3:
3212 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
3213 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
3214 // CHECK9: omp_offload.cont4:
3215 // CHECK9-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
3216 // CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32
3217 // CHECK9-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
3218 // CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
3219 // CHECK9: invoke.cont5:
3220 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
3221 // CHECK9-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
3222 // CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
3223 // CHECK9: invoke.cont7:
3224 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
3225 // CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
3226 // CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
3227 // CHECK9-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4
3228 // CHECK9-NEXT: ret i32 [[TMP41]]
3229 // CHECK9: eh.resume:
3230 // CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
3231 // CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
3232 // CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
3233 // CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
3234 // CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
3237 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El
3238 // CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
3239 // CHECK9-NEXT: entry:
3240 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
3241 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
3242 // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
3243 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
3244 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
3245 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
3246 // CHECK9-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
3247 // CHECK9-NEXT: ret void
3250 // CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv
3251 // CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat {
3252 // CHECK9-NEXT: entry:
3253 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
3254 // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
3255 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
3256 // CHECK9-NEXT: call void @_Z8mayThrowv()
3257 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
3258 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
3259 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
3260 // CHECK9-NEXT: ret i8 [[CONV]]
3263 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
3264 // CHECK9-SAME: () #[[ATTR2:[0-9]+]] {
3265 // CHECK9-NEXT: entry:
3266 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
3267 // CHECK9-NEXT: ret void
3270 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
3271 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
3272 // CHECK9-NEXT: entry:
3273 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3274 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3275 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3276 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3277 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3278 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3279 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3280 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3281 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
3282 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3283 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3284 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3285 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
3286 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3287 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3288 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3289 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
3290 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3291 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3292 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
3293 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3294 // CHECK9: cond.true:
3295 // CHECK9-NEXT: br label [[COND_END:%.*]]
3296 // CHECK9: cond.false:
3297 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3298 // CHECK9-NEXT: br label [[COND_END]]
3299 // CHECK9: cond.end:
3300 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
3301 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3302 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3303 // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
3304 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3305 // CHECK9: omp.inner.for.cond:
3306 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
3307 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
3308 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
3309 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3310 // CHECK9: omp.inner.for.body:
3311 // CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group [[ACC_GRP9]]
3312 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
3313 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
3314 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
3315 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
3316 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP9]]
3317 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3318 // CHECK9: omp.inner.for.inc:
3319 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
3320 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
3321 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
3322 // CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
3323 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
3324 // CHECK9: omp.inner.for.end:
3325 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3326 // CHECK9: omp.loop.exit:
3327 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
3328 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3329 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3330 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3331 // CHECK9: .omp.final.then:
3332 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
3333 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
3334 // CHECK9: .omp.final.done:
3335 // CHECK9-NEXT: ret void
3338 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
3339 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
3340 // CHECK9-NEXT: entry:
3341 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3342 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3343 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3344 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3345 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3346 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3347 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3348 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3349 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3350 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3351 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
3352 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3353 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3354 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3355 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3356 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3357 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
3358 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3359 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
3360 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3361 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
3362 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
3363 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
3364 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3365 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3366 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3367 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
3368 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3369 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3370 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
3371 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3372 // CHECK9: cond.true:
3373 // CHECK9-NEXT: br label [[COND_END:%.*]]
3374 // CHECK9: cond.false:
3375 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3376 // CHECK9-NEXT: br label [[COND_END]]
3377 // CHECK9: cond.end:
3378 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
3379 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3380 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3381 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
3382 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3383 // CHECK9: omp.inner.for.cond:
3384 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
3385 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
3386 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
3387 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3388 // CHECK9: omp.inner.for.body:
3389 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3390 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
3391 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3392 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
3393 // CHECK9-NEXT: invoke void @_Z3foov()
3394 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP13]]
3395 // CHECK9: invoke.cont:
3396 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3397 // CHECK9: omp.body.continue:
3398 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3399 // CHECK9: omp.inner.for.inc:
3400 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3401 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
3402 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
3403 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
3404 // CHECK9: omp.inner.for.end:
3405 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3406 // CHECK9: omp.loop.exit:
3407 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
3408 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3409 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
3410 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3411 // CHECK9: .omp.final.then:
3412 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
3413 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
3414 // CHECK9: .omp.final.done:
3415 // CHECK9-NEXT: ret void
3416 // CHECK9: terminate.lpad:
3417 // CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
3418 // CHECK9-NEXT: catch ptr null
3419 // CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
3420 // CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP13]]
3421 // CHECK9-NEXT: unreachable
3424 // CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate
3425 // CHECK9-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
3426 // CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
3427 // CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
3428 // CHECK9-NEXT: unreachable
3431 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
3432 // CHECK9-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
3433 // CHECK9-NEXT: entry:
3434 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
3435 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
3436 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
3437 // CHECK9-NEXT: ret void
3440 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
3441 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
3442 // CHECK9-NEXT: entry:
3443 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3444 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3445 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
3446 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3447 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3448 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3449 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3450 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3451 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3452 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
3453 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3454 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3455 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
3456 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
3457 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3458 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
3459 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3460 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3461 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3462 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
3463 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3464 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3465 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
3466 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3467 // CHECK9: cond.true:
3468 // CHECK9-NEXT: br label [[COND_END:%.*]]
3469 // CHECK9: cond.false:
3470 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3471 // CHECK9-NEXT: br label [[COND_END]]
3472 // CHECK9: cond.end:
3473 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
3474 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3475 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3476 // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
3477 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3478 // CHECK9: omp.inner.for.cond:
3479 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
3480 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
3481 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
3482 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3483 // CHECK9: omp.inner.for.body:
3484 // CHECK9-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP18]]
3485 // CHECK9-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
3486 // CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]), !llvm.access.group [[ACC_GRP18]]
3487 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
3488 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
3489 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
3490 // CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
3491 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP18]]
3492 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3493 // CHECK9: omp.inner.for.inc:
3494 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
3495 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
3496 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
3497 // CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
3498 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
3499 // CHECK9: omp.inner.for.end:
3500 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3501 // CHECK9: omp.loop.exit:
3502 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
3503 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3504 // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
3505 // CHECK9-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3506 // CHECK9: .omp.final.then:
3507 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
3508 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
3509 // CHECK9: .omp.final.done:
3510 // CHECK9-NEXT: ret void
3513 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
3514 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
3515 // CHECK9-NEXT: entry:
3516 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3517 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3518 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3519 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3520 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3521 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3522 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3523 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3524 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3525 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3526 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
3527 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3528 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3529 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3530 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3531 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3532 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
3533 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3534 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
3535 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3536 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
3537 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
3538 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
3539 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3540 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3541 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3542 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
3543 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3544 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3545 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
3546 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3547 // CHECK9: cond.true:
3548 // CHECK9-NEXT: br label [[COND_END:%.*]]
3549 // CHECK9: cond.false:
3550 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3551 // CHECK9-NEXT: br label [[COND_END]]
3552 // CHECK9: cond.end:
3553 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
3554 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3555 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3556 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
3557 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3558 // CHECK9: omp.inner.for.cond:
3559 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
3560 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
3561 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
3562 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3563 // CHECK9: omp.inner.for.body:
3564 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
3565 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
3566 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3567 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
3568 // CHECK9-NEXT: invoke void @_Z3foov()
3569 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP21]]
3570 // CHECK9: invoke.cont:
3571 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3572 // CHECK9: omp.body.continue:
3573 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3574 // CHECK9: omp.inner.for.inc:
3575 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
3576 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
3577 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
3578 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
3579 // CHECK9: omp.inner.for.end:
3580 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3581 // CHECK9: omp.loop.exit:
3582 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
3583 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3584 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
3585 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3586 // CHECK9: .omp.final.then:
3587 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
3588 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
3589 // CHECK9: .omp.final.done:
3590 // CHECK9-NEXT: ret void
3591 // CHECK9: terminate.lpad:
3592 // CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
3593 // CHECK9-NEXT: catch ptr null
3594 // CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
3595 // CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP21]]
3596 // CHECK9-NEXT: unreachable
3599 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
3600 // CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat {
3601 // CHECK9-NEXT: entry:
3602 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3603 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
3604 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
3605 // CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3606 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
3607 // CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4
3608 // CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
3609 // CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4
3610 // CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
3611 // CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8
3612 // CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
3613 // CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8
3614 // CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
3615 // CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8
3616 // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
3617 // CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8
3618 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
3619 // CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8
3620 // CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
3621 // CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8
3622 // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
3623 // CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8
3624 // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
3625 // CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8
3626 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
3627 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
3628 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
3629 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
3630 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
3631 // CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
3632 // CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
3633 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3634 // CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3635 // CHECK9: omp_offload.failed:
3636 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
3637 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
3638 // CHECK9: omp_offload.cont:
3639 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
3640 // CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4
3641 // CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
3642 // CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4
3643 // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
3644 // CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8
3645 // CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
3646 // CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8
3647 // CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
3648 // CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8
3649 // CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
3650 // CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8
3651 // CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
3652 // CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8
3653 // CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
3654 // CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8
3655 // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
3656 // CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8
3657 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
3658 // CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8
3659 // CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
3660 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
3661 // CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
3662 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
3663 // CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
3664 // CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4
3665 // CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
3666 // CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
3667 // CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
3668 // CHECK9: omp_offload.failed3:
3669 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
3670 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
3671 // CHECK9: omp_offload.cont4:
3672 // CHECK9-NEXT: ret i32 0
3675 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
3676 // CHECK9-SAME: () #[[ATTR6]] comdat {
3677 // CHECK9-NEXT: entry:
3678 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3679 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
3680 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
3681 // CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
3682 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
3683 // CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4
3684 // CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
3685 // CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4
3686 // CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
3687 // CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8
3688 // CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
3689 // CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8
3690 // CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
3691 // CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8
3692 // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
3693 // CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8
3694 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
3695 // CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8
3696 // CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
3697 // CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8
3698 // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
3699 // CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8
3700 // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
3701 // CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8
3702 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
3703 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
3704 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
3705 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
3706 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
3707 // CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4
3708 // CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
3709 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3710 // CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
3711 // CHECK9: omp_offload.failed:
3712 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
3713 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]]
3714 // CHECK9: omp_offload.cont:
3715 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
3716 // CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4
3717 // CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
3718 // CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4
3719 // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
3720 // CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8
3721 // CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
3722 // CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8
3723 // CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
3724 // CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8
3725 // CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
3726 // CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8
3727 // CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
3728 // CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8
3729 // CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
3730 // CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8
3731 // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
3732 // CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8
3733 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
3734 // CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8
3735 // CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
3736 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
3737 // CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
3738 // CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
3739 // CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
3740 // CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4
3741 // CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
3742 // CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
3743 // CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
3744 // CHECK9: omp_offload.failed3:
3745 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
3746 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]]
3747 // CHECK9: omp_offload.cont4:
3748 // CHECK9-NEXT: ret i32 0
3751 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev
3752 // CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
3753 // CHECK9-NEXT: entry:
3754 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
3755 // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
3756 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
3757 // CHECK9-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
3758 // CHECK9-NEXT: ret void
3761 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El
3762 // CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat {
3763 // CHECK9-NEXT: entry:
3764 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
3765 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
3766 // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
3767 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
3768 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
3769 // CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
3770 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
3771 // CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
3772 // CHECK9-NEXT: ret void
3775 // CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev
3776 // CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
3777 // CHECK9-NEXT: entry:
3778 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
3779 // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
3780 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
3781 // CHECK9-NEXT: ret void
3784 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
3785 // CHECK9-SAME: () #[[ATTR2]] {
3786 // CHECK9-NEXT: entry:
3787 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
3788 // CHECK9-NEXT: ret void
3791 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
3792 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
3793 // CHECK9-NEXT: entry:
3794 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3795 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3796 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3797 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3798 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3799 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3800 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3801 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3802 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
3803 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3804 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3805 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3806 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
3807 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3808 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3809 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3810 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
3811 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3812 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3813 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
3814 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3815 // CHECK9: cond.true:
3816 // CHECK9-NEXT: br label [[COND_END:%.*]]
3817 // CHECK9: cond.false:
3818 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3819 // CHECK9-NEXT: br label [[COND_END]]
3820 // CHECK9: cond.end:
3821 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
3822 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3823 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3824 // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
3825 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3826 // CHECK9: omp.inner.for.cond:
3827 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
3828 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
3829 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
3830 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3831 // CHECK9: omp.inner.for.body:
3832 // CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5), !llvm.access.group [[ACC_GRP24]]
3833 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
3834 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
3835 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
3836 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
3837 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP24]]
3838 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3839 // CHECK9: omp.inner.for.inc:
3840 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
3841 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
3842 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
3843 // CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
3844 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
3845 // CHECK9: omp.inner.for.end:
3846 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3847 // CHECK9: omp.loop.exit:
3848 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
3849 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3850 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
3851 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3852 // CHECK9: .omp.final.then:
3853 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
3854 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
3855 // CHECK9: .omp.final.done:
3856 // CHECK9-NEXT: ret void
3859 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
3860 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
3861 // CHECK9-NEXT: entry:
3862 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3863 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3864 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
3865 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
3866 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3867 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3868 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
3869 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
3870 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3871 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3872 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
3873 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3874 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3875 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3876 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3877 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
3878 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
3879 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
3880 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
3881 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
3882 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
3883 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
3884 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
3885 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3886 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3887 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3888 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
3889 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3890 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3891 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
3892 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3893 // CHECK9: cond.true:
3894 // CHECK9-NEXT: br label [[COND_END:%.*]]
3895 // CHECK9: cond.false:
3896 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
3897 // CHECK9-NEXT: br label [[COND_END]]
3898 // CHECK9: cond.end:
3899 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
3900 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
3901 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
3902 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
3903 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3904 // CHECK9: omp.inner.for.cond:
3905 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
3906 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
3907 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
3908 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3909 // CHECK9: omp.inner.for.body:
3910 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3911 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
3912 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
3913 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
3914 // CHECK9-NEXT: invoke void @_Z3foov()
3915 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP27]]
3916 // CHECK9: invoke.cont:
3917 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
3918 // CHECK9: omp.body.continue:
3919 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
3920 // CHECK9: omp.inner.for.inc:
3921 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3922 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
3923 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
3924 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
3925 // CHECK9: omp.inner.for.end:
3926 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
3927 // CHECK9: omp.loop.exit:
3928 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
3929 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
3930 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
3931 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
3932 // CHECK9: .omp.final.then:
3933 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
3934 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
3935 // CHECK9: .omp.final.done:
3936 // CHECK9-NEXT: ret void
3937 // CHECK9: terminate.lpad:
3938 // CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
3939 // CHECK9-NEXT: catch ptr null
3940 // CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
3941 // CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP27]]
3942 // CHECK9-NEXT: unreachable
3945 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
3946 // CHECK9-SAME: () #[[ATTR2]] {
3947 // CHECK9-NEXT: entry:
3948 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
3949 // CHECK9-NEXT: ret void
3952 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
3953 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
3954 // CHECK9-NEXT: entry:
3955 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
3956 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
3957 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
3958 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
3959 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
3960 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
3961 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
3962 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
3963 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
3964 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
3965 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
3966 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
3967 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
3968 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
3969 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
3970 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
3971 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
3972 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
3973 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3974 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
3975 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
3976 // CHECK9: cond.true:
3977 // CHECK9-NEXT: br label [[COND_END:%.*]]
3978 // CHECK9: cond.false:
3979 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
3980 // CHECK9-NEXT: br label [[COND_END]]
3981 // CHECK9: cond.end:
3982 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
3983 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
3984 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
3985 // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
3986 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
3987 // CHECK9: omp.inner.for.cond:
3988 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
3989 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
3990 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
3991 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
3992 // CHECK9: omp.inner.for.body:
3993 // CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23), !llvm.access.group [[ACC_GRP30]]
3994 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
3995 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
3996 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
3997 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
3998 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP30]]
3999 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4000 // CHECK9: omp.inner.for.inc:
4001 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
4002 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
4003 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
4004 // CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
4005 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
4006 // CHECK9: omp.inner.for.end:
4007 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4008 // CHECK9: omp.loop.exit:
4009 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
4010 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4011 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4012 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4013 // CHECK9: .omp.final.then:
4014 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
4015 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
4016 // CHECK9: .omp.final.done:
4017 // CHECK9-NEXT: ret void
4020 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
4021 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
4022 // CHECK9-NEXT: entry:
4023 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4024 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4025 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4026 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4027 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4028 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4029 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4030 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4031 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4032 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4033 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
4034 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
4035 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
4036 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
4037 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
4038 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4039 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
4040 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
4041 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
4042 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
4043 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
4044 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
4045 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
4046 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4047 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4048 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
4049 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
4050 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4051 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4052 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
4053 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4054 // CHECK9: cond.true:
4055 // CHECK9-NEXT: br label [[COND_END:%.*]]
4056 // CHECK9: cond.false:
4057 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4058 // CHECK9-NEXT: br label [[COND_END]]
4059 // CHECK9: cond.end:
4060 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
4061 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
4062 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4063 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
4064 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4065 // CHECK9: omp.inner.for.cond:
4066 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
4067 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
4068 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
4069 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4070 // CHECK9: omp.inner.for.body:
4071 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4072 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
4073 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4074 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
4075 // CHECK9-NEXT: invoke void @_Z3foov()
4076 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP33]]
4077 // CHECK9: invoke.cont:
4078 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4079 // CHECK9: omp.body.continue:
4080 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4081 // CHECK9: omp.inner.for.inc:
4082 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4083 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
4084 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
4085 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
4086 // CHECK9: omp.inner.for.end:
4087 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4088 // CHECK9: omp.loop.exit:
4089 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
4090 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4091 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
4092 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4093 // CHECK9: .omp.final.then:
4094 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
4095 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
4096 // CHECK9: .omp.final.done:
4097 // CHECK9-NEXT: ret void
4098 // CHECK9: terminate.lpad:
4099 // CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
4100 // CHECK9-NEXT: catch ptr null
4101 // CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
4102 // CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP33]]
4103 // CHECK9-NEXT: unreachable
4106 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
4107 // CHECK9-SAME: () #[[ATTR2]] {
4108 // CHECK9-NEXT: entry:
4109 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
4110 // CHECK9-NEXT: ret void
4113 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
4114 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
4115 // CHECK9-NEXT: entry:
4116 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4117 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4118 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4119 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4120 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4121 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4122 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4123 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4124 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
4125 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
4126 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
4127 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4128 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
4129 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4130 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4131 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
4132 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
4133 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4134 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4135 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
4136 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4137 // CHECK9: cond.true:
4138 // CHECK9-NEXT: br label [[COND_END:%.*]]
4139 // CHECK9: cond.false:
4140 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4141 // CHECK9-NEXT: br label [[COND_END]]
4142 // CHECK9: cond.end:
4143 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4144 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4145 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4146 // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
4147 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4148 // CHECK9: omp.inner.for.cond:
4149 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
4150 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
4151 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4152 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4153 // CHECK9: omp.inner.for.body:
4154 // CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1), !llvm.access.group [[ACC_GRP36]]
4155 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
4156 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
4157 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
4158 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
4159 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP36]]
4160 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4161 // CHECK9: omp.inner.for.inc:
4162 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
4163 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
4164 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
4165 // CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
4166 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
4167 // CHECK9: omp.inner.for.end:
4168 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4169 // CHECK9: omp.loop.exit:
4170 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
4171 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4172 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4173 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4174 // CHECK9: .omp.final.then:
4175 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
4176 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
4177 // CHECK9: .omp.final.done:
4178 // CHECK9-NEXT: ret void
4181 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
4182 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
4183 // CHECK9-NEXT: entry:
4184 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4185 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4186 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4187 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4188 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4189 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4190 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4191 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4192 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4193 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4194 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
4195 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
4196 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
4197 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
4198 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
4199 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4200 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
4201 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
4202 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
4203 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
4204 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
4205 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
4206 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
4207 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4208 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4209 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
4210 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
4211 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4212 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4213 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
4214 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4215 // CHECK9: cond.true:
4216 // CHECK9-NEXT: br label [[COND_END:%.*]]
4217 // CHECK9: cond.false:
4218 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4219 // CHECK9-NEXT: br label [[COND_END]]
4220 // CHECK9: cond.end:
4221 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
4222 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
4223 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4224 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
4225 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4226 // CHECK9: omp.inner.for.cond:
4227 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
4228 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
4229 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
4230 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4231 // CHECK9: omp.inner.for.body:
4232 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4233 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
4234 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4235 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
4236 // CHECK9-NEXT: invoke void @_Z3foov()
4237 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP39]]
4238 // CHECK9: invoke.cont:
4239 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4240 // CHECK9: omp.body.continue:
4241 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4242 // CHECK9: omp.inner.for.inc:
4243 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4244 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
4245 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
4246 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
4247 // CHECK9: omp.inner.for.end:
4248 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4249 // CHECK9: omp.loop.exit:
4250 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
4251 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4252 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
4253 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4254 // CHECK9: .omp.final.then:
4255 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
4256 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
4257 // CHECK9: .omp.final.done:
4258 // CHECK9-NEXT: ret void
4259 // CHECK9: terminate.lpad:
4260 // CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
4261 // CHECK9-NEXT: catch ptr null
4262 // CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
4263 // CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP39]]
4264 // CHECK9-NEXT: unreachable
4267 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
4268 // CHECK9-SAME: () #[[ATTR2]] {
4269 // CHECK9-NEXT: entry:
4270 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
4271 // CHECK9-NEXT: ret void
4274 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
4275 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
4276 // CHECK9-NEXT: entry:
4277 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4278 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4279 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4280 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4281 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4282 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4283 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4284 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4285 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
4286 // CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
4287 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
4288 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
4289 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4290 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
4291 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4292 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4293 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
4294 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
4295 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4296 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4297 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
4298 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4299 // CHECK9: cond.true:
4300 // CHECK9-NEXT: br label [[COND_END:%.*]]
4301 // CHECK9: cond.false:
4302 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4303 // CHECK9-NEXT: br label [[COND_END]]
4304 // CHECK9: cond.end:
4305 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4306 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4307 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4308 // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
4309 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4310 // CHECK9: omp.inner.for.cond:
4311 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]]
4312 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
4313 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4314 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4315 // CHECK9: omp.inner.for.body:
4316 // CHECK9-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23)
4317 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP42]]
4318 // CHECK9: invoke.cont:
4319 // CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]])
4320 // CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP42]]
4321 // CHECK9: invoke.cont2:
4322 // CHECK9-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32
4323 // CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]), !llvm.access.group [[ACC_GRP42]]
4324 // CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP42]]
4325 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP42]]
4326 // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
4327 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
4328 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
4329 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP42]]
4330 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4331 // CHECK9: omp.inner.for.inc:
4332 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
4333 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP42]]
4334 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
4335 // CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
4336 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
4337 // CHECK9: omp.inner.for.end:
4338 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4339 // CHECK9: omp.loop.exit:
4340 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
4341 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4342 // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
4343 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4344 // CHECK9: .omp.final.then:
4345 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
4346 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
4347 // CHECK9: .omp.final.done:
4348 // CHECK9-NEXT: ret void
4349 // CHECK9: terminate.lpad:
4350 // CHECK9-NEXT: [[TMP16:%.*]] = landingpad { ptr, i32 }
4351 // CHECK9-NEXT: catch ptr null
4352 // CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP16]], 0
4353 // CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP17]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP42]]
4354 // CHECK9-NEXT: unreachable
4357 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
4358 // CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
4359 // CHECK9-NEXT: entry:
4360 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4361 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4362 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
4363 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
4364 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4365 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
4366 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4367 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4368 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4369 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4370 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
4371 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
4372 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
4373 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
4374 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
4375 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4376 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
4377 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
4378 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
4379 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
4380 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
4381 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
4382 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
4383 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4384 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4385 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
4386 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
4387 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4388 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4389 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
4390 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4391 // CHECK9: cond.true:
4392 // CHECK9-NEXT: br label [[COND_END:%.*]]
4393 // CHECK9: cond.false:
4394 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
4395 // CHECK9-NEXT: br label [[COND_END]]
4396 // CHECK9: cond.end:
4397 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
4398 // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
4399 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4400 // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
4401 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4402 // CHECK9: omp.inner.for.cond:
4403 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]]
4404 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]]
4405 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
4406 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4407 // CHECK9: omp.inner.for.body:
4408 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
4409 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
4410 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4411 // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]]
4412 // CHECK9-NEXT: invoke void @_Z3foov()
4413 // CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP45]]
4414 // CHECK9: invoke.cont:
4415 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4416 // CHECK9: omp.body.continue:
4417 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4418 // CHECK9: omp.inner.for.inc:
4419 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
4420 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
4421 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
4422 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
4423 // CHECK9: omp.inner.for.end:
4424 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
4425 // CHECK9: omp.loop.exit:
4426 // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
4427 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
4428 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
4429 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
4430 // CHECK9: .omp.final.then:
4431 // CHECK9-NEXT: store i32 100, ptr [[I]], align 4
4432 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]]
4433 // CHECK9: .omp.final.done:
4434 // CHECK9-NEXT: ret void
4435 // CHECK9: terminate.lpad:
4436 // CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
4437 // CHECK9-NEXT: catch ptr null
4438 // CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
4439 // CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP45]]
4440 // CHECK9-NEXT: unreachable
4443 // CHECK11-LABEL: define {{[^@]+}}@main
4444 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
4445 // CHECK11-NEXT: entry:
4446 // CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
4447 // CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
4448 // CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1
4449 // CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
4450 // CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
4451 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
4452 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4453 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4454 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4455 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
4456 // CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4
4457 // CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4
4458 // CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4
4459 // CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4
4460 // CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4
4461 // CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4
4462 // CHECK11-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
4463 // CHECK11-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
4464 // CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
4465 // CHECK11: invoke.cont:
4466 // CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1
4467 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4468 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
4469 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4470 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
4471 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4472 // CHECK11: omp.inner.for.cond:
4473 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
4474 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]]
4475 // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
4476 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4477 // CHECK11: omp.inner.for.body:
4478 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4479 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
4480 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4481 // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
4482 // CHECK11-NEXT: invoke void @_Z3foov()
4483 // CHECK11-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]]
4484 // CHECK11: invoke.cont1:
4485 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4486 // CHECK11: omp.body.continue:
4487 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4488 // CHECK11: omp.inner.for.inc:
4489 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4490 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1
4491 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
4492 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
4493 // CHECK11: lpad:
4494 // CHECK11-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
4495 // CHECK11-NEXT: cleanup
4496 // CHECK11-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
4497 // CHECK11-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
4498 // CHECK11-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
4499 // CHECK11-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4
4500 // CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5:[0-9]+]]
4501 // CHECK11-NEXT: br label [[EH_RESUME:%.*]]
4502 // CHECK11: omp.inner.for.end:
4503 // CHECK11-NEXT: store i32 100, ptr [[I]], align 4
4504 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4
4505 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4
4506 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4
4507 // CHECK11-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4
4508 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]]
4509 // CHECK11: omp.inner.for.cond8:
4510 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
4511 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]]
4512 // CHECK11-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
4513 // CHECK11-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]]
4514 // CHECK11: omp.inner.for.body10:
4515 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4516 // CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1
4517 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]]
4518 // CHECK11-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]]
4519 // CHECK11-NEXT: invoke void @_Z3foov()
4520 // CHECK11-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]]
4521 // CHECK11: invoke.cont13:
4522 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]]
4523 // CHECK11: omp.body.continue14:
4524 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]]
4525 // CHECK11: omp.inner.for.inc15:
4526 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4527 // CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1
4528 // CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]]
4529 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]]
4530 // CHECK11: omp.inner.for.end17:
4531 // CHECK11-NEXT: store i32 100, ptr [[I7]], align 4
4532 // CHECK11-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1
4533 // CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32
4534 // CHECK11-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
4535 // CHECK11-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]]
4536 // CHECK11: invoke.cont18:
4537 // CHECK11-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]]
4538 // CHECK11-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
4539 // CHECK11-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]]
4540 // CHECK11: invoke.cont21:
4541 // CHECK11-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]]
4542 // CHECK11-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4
4543 // CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]]
4544 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4
4545 // CHECK11-NEXT: ret i32 [[TMP14]]
4546 // CHECK11: eh.resume:
4547 // CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
4548 // CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
4549 // CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
4550 // CHECK11-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
4551 // CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL24]]
4552 // CHECK11: terminate.lpad:
4553 // CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
4554 // CHECK11-NEXT: catch ptr null
4555 // CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
4556 // CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR6:[0-9]+]], !llvm.access.group [[ACC_GRP2]]
4557 // CHECK11-NEXT: unreachable
4560 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El
4561 // CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
4562 // CHECK11-NEXT: entry:
4563 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4564 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
4565 // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4566 // CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
4567 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4568 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
4569 // CHECK11-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
4570 // CHECK11-NEXT: ret void
4573 // CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv
4574 // CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat {
4575 // CHECK11-NEXT: entry:
4576 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4577 // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4578 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4579 // CHECK11-NEXT: call void @_Z8mayThrowv()
4580 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
4581 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
4582 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
4583 // CHECK11-NEXT: ret i8 [[CONV]]
4586 // CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate
4587 // CHECK11-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat {
4588 // CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR5]]
4589 // CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR6]]
4590 // CHECK11-NEXT: unreachable
4593 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
4594 // CHECK11-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 {
4595 // CHECK11-NEXT: entry:
4596 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
4597 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4598 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4599 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4600 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
4601 // CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
4602 // CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
4603 // CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
4604 // CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
4605 // CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4
4606 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4607 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
4608 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4609 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
4610 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4611 // CHECK11: omp.inner.for.cond:
4612 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
4613 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
4614 // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
4615 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4616 // CHECK11: omp.inner.for.body:
4617 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
4618 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
4619 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4620 // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]]
4621 // CHECK11-NEXT: invoke void @_Z3foov()
4622 // CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]]
4623 // CHECK11: invoke.cont:
4624 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4625 // CHECK11: omp.body.continue:
4626 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4627 // CHECK11: omp.inner.for.inc:
4628 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
4629 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
4630 // CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
4631 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
4632 // CHECK11: omp.inner.for.end:
4633 // CHECK11-NEXT: store i32 100, ptr [[I]], align 4
4634 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
4635 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
4636 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
4637 // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
4638 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
4639 // CHECK11: omp.inner.for.cond7:
4640 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]]
4641 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]]
4642 // CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4643 // CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
4644 // CHECK11: omp.inner.for.body9:
4645 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
4646 // CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
4647 // CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
4648 // CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]]
4649 // CHECK11-NEXT: invoke void @_Z3foov()
4650 // CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]]
4651 // CHECK11: invoke.cont12:
4652 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
4653 // CHECK11: omp.body.continue13:
4654 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
4655 // CHECK11: omp.inner.for.inc14:
4656 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
4657 // CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
4658 // CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]]
4659 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]]
4660 // CHECK11: omp.inner.for.end16:
4661 // CHECK11-NEXT: store i32 100, ptr [[I6]], align 4
4662 // CHECK11-NEXT: ret i32 0
4663 // CHECK11: terminate.lpad:
4664 // CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
4665 // CHECK11-NEXT: catch ptr null
4666 // CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
4667 // CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR6]], !llvm.access.group [[ACC_GRP9]]
4668 // CHECK11-NEXT: unreachable
4671 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
4672 // CHECK11-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 {
4673 // CHECK11-NEXT: entry:
4674 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4
4675 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
4676 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
4677 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4678 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4
4679 // CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4
4680 // CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4
4681 // CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4
4682 // CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4
4683 // CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4
4684 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
4685 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
4686 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
4687 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4
4688 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4689 // CHECK11: omp.inner.for.cond:
4690 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]]
4691 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]]
4692 // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]]
4693 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4694 // CHECK11: omp.inner.for.body:
4695 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
4696 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
4697 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
4698 // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]]
4699 // CHECK11-NEXT: invoke void @_Z3foov()
4700 // CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]]
4701 // CHECK11: invoke.cont:
4702 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
4703 // CHECK11: omp.body.continue:
4704 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4705 // CHECK11: omp.inner.for.inc:
4706 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
4707 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1
4708 // CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]]
4709 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
4710 // CHECK11: omp.inner.for.end:
4711 // CHECK11-NEXT: store i32 100, ptr [[I]], align 4
4712 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4
4713 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4
4714 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4
4715 // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4
4716 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]]
4717 // CHECK11: omp.inner.for.cond7:
4718 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
4719 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]]
4720 // CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
4721 // CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]]
4722 // CHECK11: omp.inner.for.body9:
4723 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
4724 // CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1
4725 // CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]]
4726 // CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]]
4727 // CHECK11-NEXT: invoke void @_Z3foov()
4728 // CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]]
4729 // CHECK11: invoke.cont12:
4730 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]]
4731 // CHECK11: omp.body.continue13:
4732 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]]
4733 // CHECK11: omp.inner.for.inc14:
4734 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
4735 // CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1
4736 // CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]]
4737 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]]
4738 // CHECK11: omp.inner.for.end16:
4739 // CHECK11-NEXT: store i32 100, ptr [[I6]], align 4
4740 // CHECK11-NEXT: ret i32 0
4741 // CHECK11: terminate.lpad:
4742 // CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 }
4743 // CHECK11-NEXT: catch ptr null
4744 // CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0
4745 // CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR6]], !llvm.access.group [[ACC_GRP15]]
4746 // CHECK11-NEXT: unreachable
4749 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev
4750 // CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat {
4751 // CHECK11-NEXT: entry:
4752 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4753 // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4754 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4755 // CHECK11-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR5]]
4756 // CHECK11-NEXT: ret void
4759 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El
4760 // CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat {
4761 // CHECK11-NEXT: entry:
4762 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4763 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
4764 // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4765 // CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
4766 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4767 // CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
4768 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
4769 // CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
4770 // CHECK11-NEXT: ret void
4773 // CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev
4774 // CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat {
4775 // CHECK11-NEXT: entry:
4776 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4777 // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4778 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4779 // CHECK11-NEXT: ret void
4782 // CHECK13-LABEL: define {{[^@]+}}@main
4783 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
4784 // CHECK13-NEXT: entry:
4785 // CHECK13-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
4786 // CHECK13-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8
4787 // CHECK13-NEXT: [[A:%.*]] = alloca i8, align 1
4788 // CHECK13-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
4789 // CHECK13-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
4790 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
4791 // CHECK13-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
4792 // CHECK13-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
4793 // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
4794 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
4795 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
4796 // CHECK13-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
4797 // CHECK13-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
4798 // CHECK13-NEXT: store i32 0, ptr [[RETVAL]], align 4
4799 // CHECK13-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0)
4800 // CHECK13-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]])
4801 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
4802 // CHECK13: invoke.cont:
4803 // CHECK13-NEXT: store i8 [[CALL]], ptr [[A]], align 1
4804 // CHECK13-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
4805 // CHECK13-NEXT: store i32 3, ptr [[TMP0]], align 4
4806 // CHECK13-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
4807 // CHECK13-NEXT: store i32 0, ptr [[TMP1]], align 4
4808 // CHECK13-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
4809 // CHECK13-NEXT: store ptr null, ptr [[TMP2]], align 8
4810 // CHECK13-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
4811 // CHECK13-NEXT: store ptr null, ptr [[TMP3]], align 8
4812 // CHECK13-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
4813 // CHECK13-NEXT: store ptr null, ptr [[TMP4]], align 8
4814 // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
4815 // CHECK13-NEXT: store ptr null, ptr [[TMP5]], align 8
4816 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
4817 // CHECK13-NEXT: store ptr null, ptr [[TMP6]], align 8
4818 // CHECK13-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
4819 // CHECK13-NEXT: store ptr null, ptr [[TMP7]], align 8
4820 // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
4821 // CHECK13-NEXT: store i64 100, ptr [[TMP8]], align 8
4822 // CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
4823 // CHECK13-NEXT: store i64 0, ptr [[TMP9]], align 8
4824 // CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
4825 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
4826 // CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
4827 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
4828 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
4829 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4
4830 // CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]])
4831 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
4832 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
4833 // CHECK13: omp_offload.failed:
4834 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]]
4835 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
4836 // CHECK13: lpad:
4837 // CHECK13-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 }
4838 // CHECK13-NEXT: cleanup
4839 // CHECK13-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0
4840 // CHECK13-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8
4841 // CHECK13-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1
4842 // CHECK13-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4
4843 // CHECK13-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
4844 // CHECK13-NEXT: br label [[EH_RESUME:%.*]]
4845 // CHECK13: omp_offload.cont:
4846 // CHECK13-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1
4847 // CHECK13-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1
4848 // CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8
4849 // CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4850 // CHECK13-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8
4851 // CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4852 // CHECK13-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8
4853 // CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
4854 // CHECK13-NEXT: store ptr null, ptr [[TMP22]], align 8
4855 // CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
4856 // CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
4857 // CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
4858 // CHECK13-NEXT: store i32 3, ptr [[TMP25]], align 4
4859 // CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
4860 // CHECK13-NEXT: store i32 1, ptr [[TMP26]], align 4
4861 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
4862 // CHECK13-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
4863 // CHECK13-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
4864 // CHECK13-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
4865 // CHECK13-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
4866 // CHECK13-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8
4867 // CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
4868 // CHECK13-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8
4869 // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
4870 // CHECK13-NEXT: store ptr null, ptr [[TMP31]], align 8
4871 // CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
4872 // CHECK13-NEXT: store ptr null, ptr [[TMP32]], align 8
4873 // CHECK13-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
4874 // CHECK13-NEXT: store i64 100, ptr [[TMP33]], align 8
4875 // CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
4876 // CHECK13-NEXT: store i64 0, ptr [[TMP34]], align 8
4877 // CHECK13-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
4878 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
4879 // CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
4880 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
4881 // CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
4882 // CHECK13-NEXT: store i32 0, ptr [[TMP37]], align 4
4883 // CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]])
4884 // CHECK13-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
4885 // CHECK13-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
4886 // CHECK13: omp_offload.failed3:
4887 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]]
4888 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]]
4889 // CHECK13: omp_offload.cont4:
4890 // CHECK13-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1
4891 // CHECK13-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32
4892 // CHECK13-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv()
4893 // CHECK13-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
4894 // CHECK13: invoke.cont5:
4895 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]]
4896 // CHECK13-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv()
4897 // CHECK13-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]]
4898 // CHECK13: invoke.cont7:
4899 // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]]
4900 // CHECK13-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4
4901 // CHECK13-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]]
4902 // CHECK13-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4
4903 // CHECK13-NEXT: ret i32 [[TMP41]]
4904 // CHECK13: eh.resume:
4905 // CHECK13-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
4906 // CHECK13-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4
4907 // CHECK13-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0
4908 // CHECK13-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1
4909 // CHECK13-NEXT: resume { ptr, i32 } [[LPAD_VAL10]]
4912 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SC1El
4913 // CHECK13-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat {
4914 // CHECK13-NEXT: entry:
4915 // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4916 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
4917 // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4918 // CHECK13-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
4919 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4920 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
4921 // CHECK13-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]])
4922 // CHECK13-NEXT: ret void
4925 // CHECK13-LABEL: define {{[^@]+}}@_ZN1ScvcEv
4926 // CHECK13-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat {
4927 // CHECK13-NEXT: entry:
4928 // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
4929 // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
4930 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
4931 // CHECK13-NEXT: call void @_Z8mayThrowv()
4932 // CHECK13-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
4933 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8
4934 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8
4935 // CHECK13-NEXT: ret i8 [[CONV]]
4938 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68
4939 // CHECK13-SAME: () #[[ATTR2:[0-9]+]] {
4940 // CHECK13-NEXT: entry:
4941 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined)
4942 // CHECK13-NEXT: ret void
4945 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined
4946 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
4947 // CHECK13-NEXT: entry:
4948 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
4949 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
4950 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
4951 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
4952 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
4953 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
4954 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
4955 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
4956 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
4957 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
4958 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
4959 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
4960 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
4961 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
4962 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
4963 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
4964 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
4965 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
4966 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4967 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
4968 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
4969 // CHECK13: cond.true:
4970 // CHECK13-NEXT: br label [[COND_END:%.*]]
4971 // CHECK13: cond.false:
4972 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
4973 // CHECK13-NEXT: br label [[COND_END]]
4974 // CHECK13: cond.end:
4975 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
4976 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
4977 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
4978 // CHECK13-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
4979 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
4980 // CHECK13: omp.inner.for.cond:
4981 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]]
4982 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
4983 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
4984 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
4985 // CHECK13: omp.inner.for.body:
4986 // CHECK13-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group [[ACC_GRP9]]
4987 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]]
4988 // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
4989 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]]
4990 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
4991 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP9]]
4992 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
4993 // CHECK13: omp.inner.for.inc:
4994 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
4995 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]]
4996 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
4997 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]]
4998 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]]
4999 // CHECK13: omp.inner.for.end:
5000 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5001 // CHECK13: omp.loop.exit:
5002 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
5003 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5004 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5005 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5006 // CHECK13: .omp.final.then:
5007 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5008 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5009 // CHECK13: .omp.final.done:
5010 // CHECK13-NEXT: ret void
5013 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined
5014 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
5015 // CHECK13-NEXT: entry:
5016 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5017 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5018 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5019 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5020 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5021 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5022 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5023 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5024 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5025 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5026 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5027 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5028 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5029 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5030 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5031 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5032 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
5033 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5034 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
5035 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5036 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
5037 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5038 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
5039 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5040 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5041 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5042 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
5043 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5044 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5045 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
5046 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5047 // CHECK13: cond.true:
5048 // CHECK13-NEXT: br label [[COND_END:%.*]]
5049 // CHECK13: cond.false:
5050 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5051 // CHECK13-NEXT: br label [[COND_END]]
5052 // CHECK13: cond.end:
5053 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
5054 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5055 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5056 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
5057 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5058 // CHECK13: omp.inner.for.cond:
5059 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]]
5060 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]]
5061 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
5062 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5063 // CHECK13: omp.inner.for.body:
5064 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
5065 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
5066 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5067 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]]
5068 // CHECK13-NEXT: invoke void @_Z3foov()
5069 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP13]]
5070 // CHECK13: invoke.cont:
5071 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5072 // CHECK13: omp.body.continue:
5073 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5074 // CHECK13: omp.inner.for.inc:
5075 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
5076 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
5077 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]]
5078 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]]
5079 // CHECK13: omp.inner.for.end:
5080 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5081 // CHECK13: omp.loop.exit:
5082 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
5083 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5084 // CHECK13-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
5085 // CHECK13-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5086 // CHECK13: .omp.final.then:
5087 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5088 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5089 // CHECK13: .omp.final.done:
5090 // CHECK13-NEXT: ret void
5091 // CHECK13: terminate.lpad:
5092 // CHECK13-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
5093 // CHECK13-NEXT: catch ptr null
5094 // CHECK13-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
5095 // CHECK13-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP13]]
5096 // CHECK13-NEXT: unreachable
5099 // CHECK13-LABEL: define {{[^@]+}}@__clang_call_terminate
5100 // CHECK13-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat {
5101 // CHECK13-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]]
5102 // CHECK13-NEXT: call void @_ZSt9terminatev() #[[ATTR7]]
5103 // CHECK13-NEXT: unreachable
5106 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74
5107 // CHECK13-SAME: (i64 [[A:%.*]]) #[[ATTR2]] {
5108 // CHECK13-NEXT: entry:
5109 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
5110 // CHECK13-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
5111 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]])
5112 // CHECK13-NEXT: ret void
5115 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined
5116 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] {
5117 // CHECK13-NEXT: entry:
5118 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5119 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5120 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
5121 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5122 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5123 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5124 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5125 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5126 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5127 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5128 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5129 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5130 // CHECK13-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
5131 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
5132 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5133 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
5134 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5135 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5136 // CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5137 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
5138 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5139 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5140 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
5141 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5142 // CHECK13: cond.true:
5143 // CHECK13-NEXT: br label [[COND_END:%.*]]
5144 // CHECK13: cond.false:
5145 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5146 // CHECK13-NEXT: br label [[COND_END]]
5147 // CHECK13: cond.end:
5148 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
5149 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5150 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5151 // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
5152 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5153 // CHECK13: omp.inner.for.cond:
5154 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
5155 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
5156 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
5157 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5158 // CHECK13: omp.inner.for.body:
5159 // CHECK13-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP18]]
5160 // CHECK13-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32
5161 // CHECK13-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]), !llvm.access.group [[ACC_GRP18]]
5162 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]]
5163 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
5164 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
5165 // CHECK13-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
5166 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP18]]
5167 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5168 // CHECK13: omp.inner.for.inc:
5169 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
5170 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]]
5171 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
5172 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
5173 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
5174 // CHECK13: omp.inner.for.end:
5175 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5176 // CHECK13: omp.loop.exit:
5177 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
5178 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5179 // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
5180 // CHECK13-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5181 // CHECK13: .omp.final.then:
5182 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5183 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5184 // CHECK13: .omp.final.done:
5185 // CHECK13-NEXT: ret void
5188 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined
5189 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
5190 // CHECK13-NEXT: entry:
5191 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5192 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5193 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5194 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5195 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5196 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5197 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5198 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5199 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5200 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5201 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5202 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5203 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5204 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5205 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5206 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5207 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
5208 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5209 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
5210 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5211 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
5212 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5213 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
5214 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5215 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5216 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5217 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
5218 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5219 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5220 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
5221 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5222 // CHECK13: cond.true:
5223 // CHECK13-NEXT: br label [[COND_END:%.*]]
5224 // CHECK13: cond.false:
5225 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5226 // CHECK13-NEXT: br label [[COND_END]]
5227 // CHECK13: cond.end:
5228 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
5229 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5230 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5231 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
5232 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5233 // CHECK13: omp.inner.for.cond:
5234 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]]
5235 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]]
5236 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
5237 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5238 // CHECK13: omp.inner.for.body:
5239 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
5240 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
5241 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5242 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]]
5243 // CHECK13-NEXT: invoke void @_Z3foov()
5244 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP21]]
5245 // CHECK13: invoke.cont:
5246 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5247 // CHECK13: omp.body.continue:
5248 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5249 // CHECK13: omp.inner.for.inc:
5250 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
5251 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
5252 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]]
5253 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]]
5254 // CHECK13: omp.inner.for.end:
5255 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5256 // CHECK13: omp.loop.exit:
5257 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
5258 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5259 // CHECK13-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
5260 // CHECK13-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5261 // CHECK13: .omp.final.then:
5262 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5263 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5264 // CHECK13: .omp.final.done:
5265 // CHECK13-NEXT: ret void
5266 // CHECK13: terminate.lpad:
5267 // CHECK13-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
5268 // CHECK13-NEXT: catch ptr null
5269 // CHECK13-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
5270 // CHECK13-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP21]]
5271 // CHECK13-NEXT: unreachable
5274 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv
5275 // CHECK13-SAME: () #[[ATTR6:[0-9]+]] comdat {
5276 // CHECK13-NEXT: entry:
5277 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5278 // CHECK13-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
5279 // CHECK13-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
5280 // CHECK13-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5281 // CHECK13-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
5282 // CHECK13-NEXT: store i32 3, ptr [[TMP0]], align 4
5283 // CHECK13-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
5284 // CHECK13-NEXT: store i32 0, ptr [[TMP1]], align 4
5285 // CHECK13-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
5286 // CHECK13-NEXT: store ptr null, ptr [[TMP2]], align 8
5287 // CHECK13-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
5288 // CHECK13-NEXT: store ptr null, ptr [[TMP3]], align 8
5289 // CHECK13-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
5290 // CHECK13-NEXT: store ptr null, ptr [[TMP4]], align 8
5291 // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
5292 // CHECK13-NEXT: store ptr null, ptr [[TMP5]], align 8
5293 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
5294 // CHECK13-NEXT: store ptr null, ptr [[TMP6]], align 8
5295 // CHECK13-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
5296 // CHECK13-NEXT: store ptr null, ptr [[TMP7]], align 8
5297 // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
5298 // CHECK13-NEXT: store i64 100, ptr [[TMP8]], align 8
5299 // CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
5300 // CHECK13-NEXT: store i64 0, ptr [[TMP9]], align 8
5301 // CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
5302 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
5303 // CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
5304 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
5305 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
5306 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4
5307 // CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
5308 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5309 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5310 // CHECK13: omp_offload.failed:
5311 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]]
5312 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
5313 // CHECK13: omp_offload.cont:
5314 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
5315 // CHECK13-NEXT: store i32 3, ptr [[TMP15]], align 4
5316 // CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
5317 // CHECK13-NEXT: store i32 0, ptr [[TMP16]], align 4
5318 // CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
5319 // CHECK13-NEXT: store ptr null, ptr [[TMP17]], align 8
5320 // CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
5321 // CHECK13-NEXT: store ptr null, ptr [[TMP18]], align 8
5322 // CHECK13-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
5323 // CHECK13-NEXT: store ptr null, ptr [[TMP19]], align 8
5324 // CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
5325 // CHECK13-NEXT: store ptr null, ptr [[TMP20]], align 8
5326 // CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
5327 // CHECK13-NEXT: store ptr null, ptr [[TMP21]], align 8
5328 // CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
5329 // CHECK13-NEXT: store ptr null, ptr [[TMP22]], align 8
5330 // CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
5331 // CHECK13-NEXT: store i64 100, ptr [[TMP23]], align 8
5332 // CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
5333 // CHECK13-NEXT: store i64 0, ptr [[TMP24]], align 8
5334 // CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
5335 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
5336 // CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
5337 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
5338 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
5339 // CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4
5340 // CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
5341 // CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
5342 // CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
5343 // CHECK13: omp_offload.failed3:
5344 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]]
5345 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]]
5346 // CHECK13: omp_offload.cont4:
5347 // CHECK13-NEXT: ret i32 0
5350 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv
5351 // CHECK13-SAME: () #[[ATTR6]] comdat {
5352 // CHECK13-NEXT: entry:
5353 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5354 // CHECK13-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
5355 // CHECK13-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
5356 // CHECK13-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
5357 // CHECK13-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
5358 // CHECK13-NEXT: store i32 3, ptr [[TMP0]], align 4
5359 // CHECK13-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
5360 // CHECK13-NEXT: store i32 0, ptr [[TMP1]], align 4
5361 // CHECK13-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
5362 // CHECK13-NEXT: store ptr null, ptr [[TMP2]], align 8
5363 // CHECK13-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
5364 // CHECK13-NEXT: store ptr null, ptr [[TMP3]], align 8
5365 // CHECK13-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
5366 // CHECK13-NEXT: store ptr null, ptr [[TMP4]], align 8
5367 // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
5368 // CHECK13-NEXT: store ptr null, ptr [[TMP5]], align 8
5369 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
5370 // CHECK13-NEXT: store ptr null, ptr [[TMP6]], align 8
5371 // CHECK13-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
5372 // CHECK13-NEXT: store ptr null, ptr [[TMP7]], align 8
5373 // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
5374 // CHECK13-NEXT: store i64 100, ptr [[TMP8]], align 8
5375 // CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
5376 // CHECK13-NEXT: store i64 0, ptr [[TMP9]], align 8
5377 // CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
5378 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4
5379 // CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
5380 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4
5381 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
5382 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4
5383 // CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]])
5384 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5385 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
5386 // CHECK13: omp_offload.failed:
5387 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]]
5388 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]]
5389 // CHECK13: omp_offload.cont:
5390 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0
5391 // CHECK13-NEXT: store i32 3, ptr [[TMP15]], align 4
5392 // CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1
5393 // CHECK13-NEXT: store i32 0, ptr [[TMP16]], align 4
5394 // CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2
5395 // CHECK13-NEXT: store ptr null, ptr [[TMP17]], align 8
5396 // CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3
5397 // CHECK13-NEXT: store ptr null, ptr [[TMP18]], align 8
5398 // CHECK13-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4
5399 // CHECK13-NEXT: store ptr null, ptr [[TMP19]], align 8
5400 // CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5
5401 // CHECK13-NEXT: store ptr null, ptr [[TMP20]], align 8
5402 // CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6
5403 // CHECK13-NEXT: store ptr null, ptr [[TMP21]], align 8
5404 // CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7
5405 // CHECK13-NEXT: store ptr null, ptr [[TMP22]], align 8
5406 // CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8
5407 // CHECK13-NEXT: store i64 100, ptr [[TMP23]], align 8
5408 // CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9
5409 // CHECK13-NEXT: store i64 0, ptr [[TMP24]], align 8
5410 // CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10
5411 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4
5412 // CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11
5413 // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4
5414 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12
5415 // CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4
5416 // CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]])
5417 // CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0
5418 // CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]]
5419 // CHECK13: omp_offload.failed3:
5420 // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]]
5421 // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]]
5422 // CHECK13: omp_offload.cont4:
5423 // CHECK13-NEXT: ret i32 0
5426 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SD1Ev
5427 // CHECK13-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
5428 // CHECK13-NEXT: entry:
5429 // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
5430 // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
5431 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
5432 // CHECK13-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]]
5433 // CHECK13-NEXT: ret void
5436 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SC2El
5437 // CHECK13-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat {
5438 // CHECK13-NEXT: entry:
5439 // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
5440 // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
5441 // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
5442 // CHECK13-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
5443 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
5444 // CHECK13-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0
5445 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
5446 // CHECK13-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8
5447 // CHECK13-NEXT: ret void
5450 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52
5451 // CHECK13-SAME: () #[[ATTR2]] {
5452 // CHECK13-NEXT: entry:
5453 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined)
5454 // CHECK13-NEXT: ret void
5457 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined
5458 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
5459 // CHECK13-NEXT: entry:
5460 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5461 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5462 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5463 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5464 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5465 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5466 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5467 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5468 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5469 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5470 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5471 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5472 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
5473 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5474 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5475 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5476 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
5477 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5478 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5479 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
5480 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5481 // CHECK13: cond.true:
5482 // CHECK13-NEXT: br label [[COND_END:%.*]]
5483 // CHECK13: cond.false:
5484 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5485 // CHECK13-NEXT: br label [[COND_END]]
5486 // CHECK13: cond.end:
5487 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
5488 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5489 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5490 // CHECK13-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
5491 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5492 // CHECK13: omp.inner.for.cond:
5493 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]]
5494 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
5495 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
5496 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5497 // CHECK13: omp.inner.for.body:
5498 // CHECK13-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5), !llvm.access.group [[ACC_GRP24]]
5499 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]]
5500 // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
5501 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]]
5502 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
5503 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP24]]
5504 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5505 // CHECK13: omp.inner.for.inc:
5506 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
5507 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]]
5508 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
5509 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]]
5510 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]]
5511 // CHECK13: omp.inner.for.end:
5512 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5513 // CHECK13: omp.loop.exit:
5514 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
5515 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5516 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5517 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5518 // CHECK13: .omp.final.then:
5519 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5520 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5521 // CHECK13: .omp.final.done:
5522 // CHECK13-NEXT: ret void
5525 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined
5526 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
5527 // CHECK13-NEXT: entry:
5528 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5529 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5530 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5531 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5532 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5533 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5534 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5535 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5536 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5537 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5538 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5539 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5540 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5541 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5542 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5543 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5544 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
5545 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5546 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
5547 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5548 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
5549 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5550 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
5551 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5552 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5553 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5554 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
5555 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5556 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5557 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
5558 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5559 // CHECK13: cond.true:
5560 // CHECK13-NEXT: br label [[COND_END:%.*]]
5561 // CHECK13: cond.false:
5562 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5563 // CHECK13-NEXT: br label [[COND_END]]
5564 // CHECK13: cond.end:
5565 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
5566 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5567 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5568 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
5569 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5570 // CHECK13: omp.inner.for.cond:
5571 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]]
5572 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]]
5573 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
5574 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5575 // CHECK13: omp.inner.for.body:
5576 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
5577 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
5578 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5579 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]]
5580 // CHECK13-NEXT: invoke void @_Z3foov()
5581 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP27]]
5582 // CHECK13: invoke.cont:
5583 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5584 // CHECK13: omp.body.continue:
5585 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5586 // CHECK13: omp.inner.for.inc:
5587 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
5588 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
5589 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]]
5590 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]]
5591 // CHECK13: omp.inner.for.end:
5592 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5593 // CHECK13: omp.loop.exit:
5594 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
5595 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5596 // CHECK13-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
5597 // CHECK13-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5598 // CHECK13: .omp.final.then:
5599 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5600 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5601 // CHECK13: .omp.final.done:
5602 // CHECK13-NEXT: ret void
5603 // CHECK13: terminate.lpad:
5604 // CHECK13-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
5605 // CHECK13-NEXT: catch ptr null
5606 // CHECK13-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
5607 // CHECK13-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP27]]
5608 // CHECK13-NEXT: unreachable
5611 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57
5612 // CHECK13-SAME: () #[[ATTR2]] {
5613 // CHECK13-NEXT: entry:
5614 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined)
5615 // CHECK13-NEXT: ret void
5618 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined
5619 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
5620 // CHECK13-NEXT: entry:
5621 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5622 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5623 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5624 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5625 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5626 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5627 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5628 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5629 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5630 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5631 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5632 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5633 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
5634 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5635 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5636 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5637 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
5638 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5639 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5640 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
5641 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5642 // CHECK13: cond.true:
5643 // CHECK13-NEXT: br label [[COND_END:%.*]]
5644 // CHECK13: cond.false:
5645 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5646 // CHECK13-NEXT: br label [[COND_END]]
5647 // CHECK13: cond.end:
5648 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
5649 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5650 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5651 // CHECK13-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
5652 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5653 // CHECK13: omp.inner.for.cond:
5654 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]]
5655 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
5656 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
5657 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5658 // CHECK13: omp.inner.for.body:
5659 // CHECK13-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23), !llvm.access.group [[ACC_GRP30]]
5660 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]]
5661 // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
5662 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]]
5663 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
5664 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP30]]
5665 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5666 // CHECK13: omp.inner.for.inc:
5667 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
5668 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]]
5669 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
5670 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]]
5671 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]]
5672 // CHECK13: omp.inner.for.end:
5673 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5674 // CHECK13: omp.loop.exit:
5675 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
5676 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5677 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5678 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5679 // CHECK13: .omp.final.then:
5680 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5681 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5682 // CHECK13: .omp.final.done:
5683 // CHECK13-NEXT: ret void
5686 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined
5687 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
5688 // CHECK13-NEXT: entry:
5689 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5690 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5691 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5692 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5693 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5694 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5695 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5696 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5697 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5698 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5699 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5700 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5701 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5702 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5703 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5704 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5705 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
5706 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5707 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
5708 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5709 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
5710 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5711 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
5712 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5713 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5714 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5715 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
5716 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5717 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5718 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
5719 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5720 // CHECK13: cond.true:
5721 // CHECK13-NEXT: br label [[COND_END:%.*]]
5722 // CHECK13: cond.false:
5723 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5724 // CHECK13-NEXT: br label [[COND_END]]
5725 // CHECK13: cond.end:
5726 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
5727 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5728 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5729 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
5730 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5731 // CHECK13: omp.inner.for.cond:
5732 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]]
5733 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]]
5734 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
5735 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5736 // CHECK13: omp.inner.for.body:
5737 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
5738 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
5739 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5740 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]]
5741 // CHECK13-NEXT: invoke void @_Z3foov()
5742 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP33]]
5743 // CHECK13: invoke.cont:
5744 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5745 // CHECK13: omp.body.continue:
5746 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5747 // CHECK13: omp.inner.for.inc:
5748 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
5749 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
5750 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]]
5751 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]]
5752 // CHECK13: omp.inner.for.end:
5753 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5754 // CHECK13: omp.loop.exit:
5755 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
5756 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5757 // CHECK13-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
5758 // CHECK13-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5759 // CHECK13: .omp.final.then:
5760 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5761 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5762 // CHECK13: .omp.final.done:
5763 // CHECK13-NEXT: ret void
5764 // CHECK13: terminate.lpad:
5765 // CHECK13-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
5766 // CHECK13-NEXT: catch ptr null
5767 // CHECK13-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
5768 // CHECK13-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP33]]
5769 // CHECK13-NEXT: unreachable
5772 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52
5773 // CHECK13-SAME: () #[[ATTR2]] {
5774 // CHECK13-NEXT: entry:
5775 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined)
5776 // CHECK13-NEXT: ret void
5779 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined
5780 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] {
5781 // CHECK13-NEXT: entry:
5782 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5783 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5784 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5785 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5786 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5787 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5788 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5789 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5790 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5791 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5792 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5793 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5794 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
5795 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5796 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5797 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5798 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
5799 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5800 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5801 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
5802 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5803 // CHECK13: cond.true:
5804 // CHECK13-NEXT: br label [[COND_END:%.*]]
5805 // CHECK13: cond.false:
5806 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5807 // CHECK13-NEXT: br label [[COND_END]]
5808 // CHECK13: cond.end:
5809 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
5810 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5811 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5812 // CHECK13-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
5813 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5814 // CHECK13: omp.inner.for.cond:
5815 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]]
5816 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
5817 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
5818 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5819 // CHECK13: omp.inner.for.body:
5820 // CHECK13-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1), !llvm.access.group [[ACC_GRP36]]
5821 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]]
5822 // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
5823 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]]
5824 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
5825 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP36]]
5826 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5827 // CHECK13: omp.inner.for.inc:
5828 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
5829 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]]
5830 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
5831 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]]
5832 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]]
5833 // CHECK13: omp.inner.for.end:
5834 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5835 // CHECK13: omp.loop.exit:
5836 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
5837 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5838 // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
5839 // CHECK13-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5840 // CHECK13: .omp.final.then:
5841 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5842 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5843 // CHECK13: .omp.final.done:
5844 // CHECK13-NEXT: ret void
5847 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined
5848 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
5849 // CHECK13-NEXT: entry:
5850 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5851 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5852 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
5853 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
5854 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5855 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5856 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
5857 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
5858 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5859 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5860 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5861 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5862 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5863 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5864 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5865 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
5866 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
5867 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
5868 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
5869 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
5870 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
5871 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
5872 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
5873 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5874 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5875 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5876 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
5877 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5878 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5879 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
5880 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5881 // CHECK13: cond.true:
5882 // CHECK13-NEXT: br label [[COND_END:%.*]]
5883 // CHECK13: cond.false:
5884 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
5885 // CHECK13-NEXT: br label [[COND_END]]
5886 // CHECK13: cond.end:
5887 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
5888 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
5889 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
5890 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
5891 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5892 // CHECK13: omp.inner.for.cond:
5893 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]]
5894 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]]
5895 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
5896 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5897 // CHECK13: omp.inner.for.body:
5898 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
5899 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
5900 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
5901 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]]
5902 // CHECK13-NEXT: invoke void @_Z3foov()
5903 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP39]]
5904 // CHECK13: invoke.cont:
5905 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
5906 // CHECK13: omp.body.continue:
5907 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5908 // CHECK13: omp.inner.for.inc:
5909 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
5910 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
5911 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]]
5912 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]]
5913 // CHECK13: omp.inner.for.end:
5914 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
5915 // CHECK13: omp.loop.exit:
5916 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
5917 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
5918 // CHECK13-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
5919 // CHECK13-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
5920 // CHECK13: .omp.final.then:
5921 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
5922 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
5923 // CHECK13: .omp.final.done:
5924 // CHECK13-NEXT: ret void
5925 // CHECK13: terminate.lpad:
5926 // CHECK13-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
5927 // CHECK13-NEXT: catch ptr null
5928 // CHECK13-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
5929 // CHECK13-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP39]]
5930 // CHECK13-NEXT: unreachable
5933 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57
5934 // CHECK13-SAME: () #[[ATTR2]] {
5935 // CHECK13-NEXT: entry:
5936 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined)
5937 // CHECK13-NEXT: ret void
5940 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined
5941 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
5942 // CHECK13-NEXT: entry:
5943 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
5944 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
5945 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
5946 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
5947 // CHECK13-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
5948 // CHECK13-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
5949 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
5950 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
5951 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
5952 // CHECK13-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8
5953 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
5954 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
5955 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
5956 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
5957 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
5958 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
5959 // CHECK13-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
5960 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
5961 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
5962 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5963 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99
5964 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
5965 // CHECK13: cond.true:
5966 // CHECK13-NEXT: br label [[COND_END:%.*]]
5967 // CHECK13: cond.false:
5968 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
5969 // CHECK13-NEXT: br label [[COND_END]]
5970 // CHECK13: cond.end:
5971 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
5972 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
5973 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
5974 // CHECK13-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
5975 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
5976 // CHECK13: omp.inner.for.cond:
5977 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]]
5978 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
5979 // CHECK13-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
5980 // CHECK13-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
5981 // CHECK13: omp.inner.for.body:
5982 // CHECK13-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23)
5983 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP42]]
5984 // CHECK13: invoke.cont:
5985 // CHECK13-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]])
5986 // CHECK13-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP42]]
5987 // CHECK13: invoke.cont2:
5988 // CHECK13-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32
5989 // CHECK13-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]), !llvm.access.group [[ACC_GRP42]]
5990 // CHECK13-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP42]]
5991 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP42]]
5992 // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
5993 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]]
5994 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
5995 // CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP42]]
5996 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
5997 // CHECK13: omp.inner.for.inc:
5998 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
5999 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP42]]
6000 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
6001 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]]
6002 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]]
6003 // CHECK13: omp.inner.for.end:
6004 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6005 // CHECK13: omp.loop.exit:
6006 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
6007 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6008 // CHECK13-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0
6009 // CHECK13-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6010 // CHECK13: .omp.final.then:
6011 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
6012 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6013 // CHECK13: .omp.final.done:
6014 // CHECK13-NEXT: ret void
6015 // CHECK13: terminate.lpad:
6016 // CHECK13-NEXT: [[TMP16:%.*]] = landingpad { ptr, i32 }
6017 // CHECK13-NEXT: catch ptr null
6018 // CHECK13-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP16]], 0
6019 // CHECK13-NEXT: call void @__clang_call_terminate(ptr [[TMP17]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP42]]
6020 // CHECK13-NEXT: unreachable
6023 // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined
6024 // CHECK13-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 {
6025 // CHECK13-NEXT: entry:
6026 // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
6027 // CHECK13-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
6028 // CHECK13-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
6029 // CHECK13-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
6030 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
6031 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4
6032 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
6033 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
6034 // CHECK13-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
6035 // CHECK13-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
6036 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4
6037 // CHECK13-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
6038 // CHECK13-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
6039 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6040 // CHECK13-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6041 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
6042 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
6043 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
6044 // CHECK13-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
6045 // CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
6046 // CHECK13-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
6047 // CHECK13-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
6048 // CHECK13-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
6049 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
6050 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
6051 // CHECK13-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
6052 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
6053 // CHECK13-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
6054 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6055 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99
6056 // CHECK13-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
6057 // CHECK13: cond.true:
6058 // CHECK13-NEXT: br label [[COND_END:%.*]]
6059 // CHECK13: cond.false:
6060 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
6061 // CHECK13-NEXT: br label [[COND_END]]
6062 // CHECK13: cond.end:
6063 // CHECK13-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
6064 // CHECK13-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
6065 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
6066 // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
6067 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
6068 // CHECK13: omp.inner.for.cond:
6069 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]]
6070 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]]
6071 // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
6072 // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
6073 // CHECK13: omp.inner.for.body:
6074 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
6075 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
6076 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
6077 // CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]]
6078 // CHECK13-NEXT: invoke void @_Z3foov()
6079 // CHECK13-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP45]]
6080 // CHECK13: invoke.cont:
6081 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
6082 // CHECK13: omp.body.continue:
6083 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
6084 // CHECK13: omp.inner.for.inc:
6085 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
6086 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
6087 // CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]]
6088 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]]
6089 // CHECK13: omp.inner.for.end:
6090 // CHECK13-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
6091 // CHECK13: omp.loop.exit:
6092 // CHECK13-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]])
6093 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
6094 // CHECK13-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0
6095 // CHECK13-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
6096 // CHECK13: .omp.final.then:
6097 // CHECK13-NEXT: store i32 100, ptr [[I]], align 4
6098 // CHECK13-NEXT: br label [[DOTOMP_FINAL_DONE]]
6099 // CHECK13: .omp.final.done:
6100 // CHECK13-NEXT: ret void
6101 // CHECK13: terminate.lpad:
6102 // CHECK13-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 }
6103 // CHECK13-NEXT: catch ptr null
6104 // CHECK13-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0
6105 // CHECK13-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP45]]
6106 // CHECK13-NEXT: unreachable
6109 // CHECK13-LABEL: define {{[^@]+}}@_ZN1SD2Ev
6110 // CHECK13-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat {
6111 // CHECK13-NEXT: entry:
6112 // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
6113 // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
6114 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
6115 // CHECK13-NEXT: ret void