[TySan] Don't report globals with incomplete types. (#121922)
[llvm-project.git] / clang / test / OpenMP / nvptx_target_parallel_reduction_codegen.cpp
blob0753573c73bce89fd11ba4c623f3e769b5bf2cc5
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test target codegen - host bc file has to be created first.
3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
4 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK-64
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32
7 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32-EX
8 // expected-no-diagnostics
9 #ifndef HEADER
10 #define HEADER
12 // Check for the data transfer medium in shared memory to transfer the reduction list to the first warp.
14 // Check that the execution mode of all 3 target regions is set to Spmd Mode.
16 template<typename tx>
17 tx ftemplate(int n) {
18 int a;
19 short b;
20 tx c;
21 float d;
22 double e;
24 #pragma omp target parallel reduction(+: e)
26 e += 5;
29 #pragma omp target parallel reduction(^: c) reduction(*: d)
31 c ^= 2;
32 d *= 33;
35 #pragma omp target parallel reduction(|: a) reduction(max: b)
37 a |= 1;
38 b = 99 > b ? 99 : b;
41 return a+b+c+d+e;
44 int bar(int n){
45 int a = 0;
47 a += ftemplate<char>(n);
49 return a;
52 // define internal void [[PFN]](
55 // Reduction function
57 // Shuffle and reduce function
58 // Condition to reduce
59 // Now check if we should just copy over the remote reduction list
61 // Inter warp copy function
62 // [[DO_COPY]]
63 // Barrier after copy to shared memory storage medium.
64 // Read into warp 0.
66 // define internal void [[PFN1]](
68 // Reduction function
70 // Shuffle and reduce function
71 // Condition to reduce
72 // Now check if we should just copy over the remote reduction list
74 // Inter warp copy function
75 // [[DO_COPY]]
76 // Barrier after copy to shared memory storage medium.
77 // Read into warp 0.
78 // [[DO_COPY]]
79 // Barrier after copy to shared memory storage medium.
80 // Read into warp 0.
82 // define internal void [[PFN2]](
85 // Reduction function
87 // Shuffle and reduce function
88 // Condition to reduce
89 // Now check if we should just copy over the remote reduction list
91 // Inter warp copy function
92 // [[DO_COPY]]
93 // Barrier after copy to shared memory storage medium.
94 // Read into warp 0.
95 // [[DO_COPY]]
96 // Barrier after copy to shared memory storage medium.
97 // Read into warp 0.
99 #endif
100 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
101 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
102 // CHECK-64-NEXT: entry:
103 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
104 // CHECK-64-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 8
105 // CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
106 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
107 // CHECK-64-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 8
108 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8
109 // CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
110 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
111 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
112 // CHECK-64: user_code.entry:
113 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
114 // CHECK-64-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
115 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
116 // CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
117 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
118 // CHECK-64-NEXT: ret void
119 // CHECK-64: worker.exit:
120 // CHECK-64-NEXT: ret void
123 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
124 // CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
125 // CHECK-64-NEXT: entry:
126 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
127 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
128 // CHECK-64-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 8
129 // CHECK-64-NEXT: [[E1:%.*]] = alloca double, align 8
130 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
131 // CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
132 // CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
133 // CHECK-64-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 8
134 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8
135 // CHECK-64-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
136 // CHECK-64-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
137 // CHECK-64-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
138 // CHECK-64-NEXT: store double [[ADD]], ptr [[E1]], align 8
139 // CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
140 // CHECK-64-NEXT: store ptr [[E1]], ptr [[TMP2]], align 8
141 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
142 // CHECK-64-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 1
143 // CHECK-64-NEXT: br i1 [[TMP4]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
144 // CHECK-64: .omp.reduction.then:
145 // CHECK-64-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP0]], align 8
146 // CHECK-64-NEXT: [[TMP6:%.*]] = load double, ptr [[E1]], align 8
147 // CHECK-64-NEXT: [[ADD2:%.*]] = fadd double [[TMP5]], [[TMP6]]
148 // CHECK-64-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
149 // CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
150 // CHECK-64: .omp.reduction.done:
151 // CHECK-64-NEXT: ret void
154 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
155 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
156 // CHECK-64-NEXT: entry:
157 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
158 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
159 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
160 // CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
161 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 8
162 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
163 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
164 // CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
165 // CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
166 // CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
167 // CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
168 // CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
169 // CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
170 // CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
171 // CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0
172 // CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
173 // CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
174 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i64 1
175 // CHECK-64-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
176 // CHECK-64-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
177 // CHECK-64-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
178 // CHECK-64-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
179 // CHECK-64-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
180 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i64 1
181 // CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
182 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
183 // CHECK-64-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
184 // CHECK-64-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
185 // CHECK-64-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
186 // CHECK-64-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
187 // CHECK-64-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
188 // CHECK-64-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
189 // CHECK-64-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
190 // CHECK-64-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
191 // CHECK-64-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
192 // CHECK-64-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
193 // CHECK-64-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
194 // CHECK-64-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
195 // CHECK-64-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
196 // CHECK-64: then:
197 // CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
198 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
199 // CHECK-64: else:
200 // CHECK-64-NEXT: br label [[IFCONT]]
201 // CHECK-64: ifcont:
202 // CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
203 // CHECK-64-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
204 // CHECK-64-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
205 // CHECK-64-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
206 // CHECK-64: then4:
207 // CHECK-64-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
208 // CHECK-64-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8
209 // CHECK-64-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0
210 // CHECK-64-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 8
211 // CHECK-64-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
212 // CHECK-64-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
213 // CHECK-64-NEXT: br label [[IFCONT6:%.*]]
214 // CHECK-64: else5:
215 // CHECK-64-NEXT: br label [[IFCONT6]]
216 // CHECK-64: ifcont6:
217 // CHECK-64-NEXT: ret void
220 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
221 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
222 // CHECK-64-NEXT: entry:
223 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
224 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
225 // CHECK-64-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
226 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
227 // CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
228 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
229 // CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
230 // CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
231 // CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
232 // CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
233 // CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
234 // CHECK-64-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
235 // CHECK-64-NEXT: br label [[PRECOND:%.*]]
236 // CHECK-64: precond:
237 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
238 // CHECK-64-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
239 // CHECK-64-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
240 // CHECK-64: body:
241 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
242 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
243 // CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
244 // CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
245 // CHECK-64: then:
246 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0
247 // CHECK-64-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8
248 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
249 // CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
250 // CHECK-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
251 // CHECK-64-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
252 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
253 // CHECK-64: else:
254 // CHECK-64-NEXT: br label [[IFCONT]]
255 // CHECK-64: ifcont:
256 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
257 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
258 // CHECK-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
259 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
260 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
261 // CHECK-64: then3:
262 // CHECK-64-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
263 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0
264 // CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
265 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
266 // CHECK-64-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
267 // CHECK-64-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
268 // CHECK-64-NEXT: br label [[IFCONT4:%.*]]
269 // CHECK-64: else4:
270 // CHECK-64-NEXT: br label [[IFCONT4]]
271 // CHECK-64: ifcont5:
272 // CHECK-64-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
273 // CHECK-64-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
274 // CHECK-64-NEXT: br label [[PRECOND]]
275 // CHECK-64: exit:
276 // CHECK-64-NEXT: ret void
279 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
280 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
281 // CHECK-64-NEXT: entry:
282 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
283 // CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
284 // CHECK-64-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
285 // CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
286 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
287 // CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
288 // CHECK-64-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
289 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
290 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 8
291 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
292 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
293 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
294 // CHECK-64: user_code.entry:
295 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
296 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
297 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
298 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
299 // CHECK-64-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 8
300 // CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
301 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
302 // CHECK-64-NEXT: ret void
303 // CHECK-64: worker.exit:
304 // CHECK-64-NEXT: ret void
307 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
308 // CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
309 // CHECK-64-NEXT: entry:
310 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
311 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
312 // CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
313 // CHECK-64-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
314 // CHECK-64-NEXT: [[C1:%.*]] = alloca i8, align 1
315 // CHECK-64-NEXT: [[D2:%.*]] = alloca float, align 4
316 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 8
317 // CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
318 // CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
319 // CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
320 // CHECK-64-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
321 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
322 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 8
323 // CHECK-64-NEXT: store i8 0, ptr [[C1]], align 1
324 // CHECK-64-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
325 // CHECK-64-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
326 // CHECK-64-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
327 // CHECK-64-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
328 // CHECK-64-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
329 // CHECK-64-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
330 // CHECK-64-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
331 // CHECK-64-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
332 // CHECK-64-NEXT: store float [[MUL]], ptr [[D2]], align 4
333 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
334 // CHECK-64-NEXT: store ptr [[C1]], ptr [[TMP4]], align 8
335 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
336 // CHECK-64-NEXT: store ptr [[D2]], ptr [[TMP5]], align 8
337 // CHECK-64-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
338 // CHECK-64-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 1
339 // CHECK-64-NEXT: br i1 [[TMP7]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
340 // CHECK-64: .omp.reduction.then:
341 // CHECK-64-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
342 // CHECK-64-NEXT: [[CONV4:%.*]] = sext i8 [[TMP8]] to i32
343 // CHECK-64-NEXT: [[TMP9:%.*]] = load i8, ptr [[C1]], align 1
344 // CHECK-64-NEXT: [[CONV5:%.*]] = sext i8 [[TMP9]] to i32
345 // CHECK-64-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
346 // CHECK-64-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
347 // CHECK-64-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
348 // CHECK-64-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP1]], align 4
349 // CHECK-64-NEXT: [[TMP11:%.*]] = load float, ptr [[D2]], align 4
350 // CHECK-64-NEXT: [[MUL8:%.*]] = fmul float [[TMP10]], [[TMP11]]
351 // CHECK-64-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
352 // CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
353 // CHECK-64: .omp.reduction.done:
354 // CHECK-64-NEXT: ret void
357 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
358 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
359 // CHECK-64-NEXT: entry:
360 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
361 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
362 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
363 // CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
364 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 8
365 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
366 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
367 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
368 // CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
369 // CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
370 // CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
371 // CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
372 // CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
373 // CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
374 // CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
375 // CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
376 // CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
377 // CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
378 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i64 1
379 // CHECK-64-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
380 // CHECK-64-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
381 // CHECK-64-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
382 // CHECK-64-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
383 // CHECK-64-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
384 // CHECK-64-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
385 // CHECK-64-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
386 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i64 1
387 // CHECK-64-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
388 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
389 // CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
390 // CHECK-64-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8
391 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
392 // CHECK-64-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i64 1
393 // CHECK-64-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
394 // CHECK-64-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
395 // CHECK-64-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
396 // CHECK-64-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
397 // CHECK-64-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
398 // CHECK-64-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i64 1
399 // CHECK-64-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
400 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 8
401 // CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
402 // CHECK-64-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
403 // CHECK-64-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
404 // CHECK-64-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
405 // CHECK-64-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
406 // CHECK-64-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
407 // CHECK-64-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
408 // CHECK-64-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
409 // CHECK-64-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
410 // CHECK-64-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
411 // CHECK-64-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
412 // CHECK-64-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
413 // CHECK-64-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
414 // CHECK-64: then:
415 // CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
416 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
417 // CHECK-64: else:
418 // CHECK-64-NEXT: br label [[IFCONT]]
419 // CHECK-64: ifcont:
420 // CHECK-64-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
421 // CHECK-64-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
422 // CHECK-64-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
423 // CHECK-64-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
424 // CHECK-64: then5:
425 // CHECK-64-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
426 // CHECK-64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 8
427 // CHECK-64-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
428 // CHECK-64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8
429 // CHECK-64-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
430 // CHECK-64-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
431 // CHECK-64-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
432 // CHECK-64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 8
433 // CHECK-64-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
434 // CHECK-64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 8
435 // CHECK-64-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
436 // CHECK-64-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
437 // CHECK-64-NEXT: br label [[IFCONT7:%.*]]
438 // CHECK-64: else6:
439 // CHECK-64-NEXT: br label [[IFCONT7]]
440 // CHECK-64: ifcont7:
441 // CHECK-64-NEXT: ret void
444 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
445 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
446 // CHECK-64-NEXT: entry:
447 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
448 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
449 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
450 // CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
451 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
452 // CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
453 // CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
454 // CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
455 // CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
456 // CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
457 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
458 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
459 // CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
460 // CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
461 // CHECK-64: then:
462 // CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
463 // CHECK-64-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
464 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
465 // CHECK-64-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
466 // CHECK-64-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
467 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
468 // CHECK-64: else:
469 // CHECK-64-NEXT: br label [[IFCONT]]
470 // CHECK-64: ifcont:
471 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
472 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
473 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
474 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
475 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
476 // CHECK-64: then3:
477 // CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
478 // CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
479 // CHECK-64-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
480 // CHECK-64-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
481 // CHECK-64-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
482 // CHECK-64-NEXT: br label [[IFCONT4:%.*]]
483 // CHECK-64: else4:
484 // CHECK-64-NEXT: br label [[IFCONT4]]
485 // CHECK-64: ifcont5:
486 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
487 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
488 // CHECK-64-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
489 // CHECK-64-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
490 // CHECK-64: then8:
491 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
492 // CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
493 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
494 // CHECK-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
495 // CHECK-64-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
496 // CHECK-64-NEXT: br label [[IFCONT8:%.*]]
497 // CHECK-64: else9:
498 // CHECK-64-NEXT: br label [[IFCONT8]]
499 // CHECK-64: ifcont10:
500 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
501 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
502 // CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
503 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
504 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
505 // CHECK-64: then13:
506 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
507 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
508 // CHECK-64-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8
509 // CHECK-64-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
510 // CHECK-64-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
511 // CHECK-64-NEXT: br label [[IFCONT12:%.*]]
512 // CHECK-64: else14:
513 // CHECK-64-NEXT: br label [[IFCONT12]]
514 // CHECK-64: ifcont15:
515 // CHECK-64-NEXT: ret void
518 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
519 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
520 // CHECK-64-NEXT: entry:
521 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
522 // CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
523 // CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
524 // CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
525 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
526 // CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
527 // CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
528 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
529 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
530 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
531 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
532 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
533 // CHECK-64: user_code.entry:
534 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
535 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
536 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
537 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
538 // CHECK-64-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 8
539 // CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
540 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
541 // CHECK-64-NEXT: ret void
542 // CHECK-64: worker.exit:
543 // CHECK-64-NEXT: ret void
546 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
547 // CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
548 // CHECK-64-NEXT: entry:
549 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
550 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
551 // CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
552 // CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
553 // CHECK-64-NEXT: [[A1:%.*]] = alloca i32, align 4
554 // CHECK-64-NEXT: [[B2:%.*]] = alloca i16, align 2
555 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 8
556 // CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
557 // CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
558 // CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
559 // CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
560 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
561 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
562 // CHECK-64-NEXT: store i32 0, ptr [[A1]], align 4
563 // CHECK-64-NEXT: store i16 -32768, ptr [[B2]], align 2
564 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
565 // CHECK-64-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
566 // CHECK-64-NEXT: store i32 [[OR]], ptr [[A1]], align 4
567 // CHECK-64-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
568 // CHECK-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
569 // CHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
570 // CHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
571 // CHECK-64: cond.true:
572 // CHECK-64-NEXT: br label [[COND_END:%.*]]
573 // CHECK-64: cond.false:
574 // CHECK-64-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
575 // CHECK-64-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
576 // CHECK-64-NEXT: br label [[COND_END]]
577 // CHECK-64: cond.end:
578 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
579 // CHECK-64-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
580 // CHECK-64-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
581 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
582 // CHECK-64-NEXT: store ptr [[A1]], ptr [[TMP5]], align 8
583 // CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
584 // CHECK-64-NEXT: store ptr [[B2]], ptr [[TMP6]], align 8
585 // CHECK-64-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
586 // CHECK-64-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 1
587 // CHECK-64-NEXT: br i1 [[TMP8]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
588 // CHECK-64: .omp.reduction.then:
589 // CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4
590 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[A1]], align 4
591 // CHECK-64-NEXT: [[OR5:%.*]] = or i32 [[TMP9]], [[TMP10]]
592 // CHECK-64-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
593 // CHECK-64-NEXT: [[TMP11:%.*]] = load i16, ptr [[TMP1]], align 2
594 // CHECK-64-NEXT: [[CONV6:%.*]] = sext i16 [[TMP11]] to i32
595 // CHECK-64-NEXT: [[TMP12:%.*]] = load i16, ptr [[B2]], align 2
596 // CHECK-64-NEXT: [[CONV7:%.*]] = sext i16 [[TMP12]] to i32
597 // CHECK-64-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
598 // CHECK-64-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
599 // CHECK-64: cond.true9:
600 // CHECK-64-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
601 // CHECK-64-NEXT: br label [[COND_END11:%.*]]
602 // CHECK-64: cond.false10:
603 // CHECK-64-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
604 // CHECK-64-NEXT: br label [[COND_END11]]
605 // CHECK-64: cond.end11:
606 // CHECK-64-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP13]], [[COND_TRUE9]] ], [ [[TMP14]], [[COND_FALSE10]] ]
607 // CHECK-64-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
608 // CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
609 // CHECK-64: .omp.reduction.done:
610 // CHECK-64-NEXT: ret void
613 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
614 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
615 // CHECK-64-NEXT: entry:
616 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
617 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
618 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
619 // CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
620 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 8
621 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
622 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
623 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
624 // CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
625 // CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
626 // CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
627 // CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
628 // CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
629 // CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
630 // CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
631 // CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
632 // CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
633 // CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
634 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i64 1
635 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
636 // CHECK-64-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
637 // CHECK-64-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
638 // CHECK-64-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
639 // CHECK-64-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
640 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i64 1
641 // CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
642 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
643 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
644 // CHECK-64-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8
645 // CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
646 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i64 1
647 // CHECK-64-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
648 // CHECK-64-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
649 // CHECK-64-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
650 // CHECK-64-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
651 // CHECK-64-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
652 // CHECK-64-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
653 // CHECK-64-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
654 // CHECK-64-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i64 1
655 // CHECK-64-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
656 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 8
657 // CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
658 // CHECK-64-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
659 // CHECK-64-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
660 // CHECK-64-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
661 // CHECK-64-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
662 // CHECK-64-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
663 // CHECK-64-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
664 // CHECK-64-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
665 // CHECK-64-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
666 // CHECK-64-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
667 // CHECK-64-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
668 // CHECK-64-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
669 // CHECK-64-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
670 // CHECK-64: then:
671 // CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
672 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
673 // CHECK-64: else:
674 // CHECK-64-NEXT: br label [[IFCONT]]
675 // CHECK-64: ifcont:
676 // CHECK-64-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
677 // CHECK-64-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
678 // CHECK-64-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
679 // CHECK-64-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
680 // CHECK-64: then5:
681 // CHECK-64-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
682 // CHECK-64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 8
683 // CHECK-64-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
684 // CHECK-64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8
685 // CHECK-64-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
686 // CHECK-64-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
687 // CHECK-64-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
688 // CHECK-64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 8
689 // CHECK-64-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
690 // CHECK-64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 8
691 // CHECK-64-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
692 // CHECK-64-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
693 // CHECK-64-NEXT: br label [[IFCONT7:%.*]]
694 // CHECK-64: else6:
695 // CHECK-64-NEXT: br label [[IFCONT7]]
696 // CHECK-64: ifcont7:
697 // CHECK-64-NEXT: ret void
700 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
701 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
702 // CHECK-64-NEXT: entry:
703 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
704 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
705 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
706 // CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
707 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
708 // CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
709 // CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
710 // CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
711 // CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
712 // CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
713 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
714 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
715 // CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
716 // CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
717 // CHECK-64: then:
718 // CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
719 // CHECK-64-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
720 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
721 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
722 // CHECK-64-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
723 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
724 // CHECK-64: else:
725 // CHECK-64-NEXT: br label [[IFCONT]]
726 // CHECK-64: ifcont:
727 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
728 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
729 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
730 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
731 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
732 // CHECK-64: then3:
733 // CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
734 // CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
735 // CHECK-64-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
736 // CHECK-64-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
737 // CHECK-64-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
738 // CHECK-64-NEXT: br label [[IFCONT4:%.*]]
739 // CHECK-64: else4:
740 // CHECK-64-NEXT: br label [[IFCONT4]]
741 // CHECK-64: ifcont5:
742 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
743 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
744 // CHECK-64-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
745 // CHECK-64-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
746 // CHECK-64: then8:
747 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
748 // CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
749 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
750 // CHECK-64-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
751 // CHECK-64-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
752 // CHECK-64-NEXT: br label [[IFCONT8:%.*]]
753 // CHECK-64: else9:
754 // CHECK-64-NEXT: br label [[IFCONT8]]
755 // CHECK-64: ifcont10:
756 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
757 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
758 // CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
759 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
760 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
761 // CHECK-64: then13:
762 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
763 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
764 // CHECK-64-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8
765 // CHECK-64-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
766 // CHECK-64-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
767 // CHECK-64-NEXT: br label [[IFCONT12:%.*]]
768 // CHECK-64: else14:
769 // CHECK-64-NEXT: br label [[IFCONT12]]
770 // CHECK-64: ifcont15:
771 // CHECK-64-NEXT: ret void
774 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
775 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
776 // CHECK-32-NEXT: entry:
777 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
778 // CHECK-32-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
779 // CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
780 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
781 // CHECK-32-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
782 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
783 // CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
784 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
785 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
786 // CHECK-32: user_code.entry:
787 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
788 // CHECK-32-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
789 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
790 // CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
791 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
792 // CHECK-32-NEXT: ret void
793 // CHECK-32: worker.exit:
794 // CHECK-32-NEXT: ret void
797 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
798 // CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
799 // CHECK-32-NEXT: entry:
800 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
801 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
802 // CHECK-32-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
803 // CHECK-32-NEXT: [[E1:%.*]] = alloca double, align 8
804 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
805 // CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
806 // CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
807 // CHECK-32-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
808 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
809 // CHECK-32-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
810 // CHECK-32-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
811 // CHECK-32-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
812 // CHECK-32-NEXT: store double [[ADD]], ptr [[E1]], align 8
813 // CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
814 // CHECK-32-NEXT: store ptr [[E1]], ptr [[TMP2]], align 4
815 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
816 // CHECK-32-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 1
817 // CHECK-32-NEXT: br i1 [[TMP4]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
818 // CHECK-32: .omp.reduction.then:
819 // CHECK-32-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP0]], align 8
820 // CHECK-32-NEXT: [[TMP6:%.*]] = load double, ptr [[E1]], align 8
821 // CHECK-32-NEXT: [[ADD2:%.*]] = fadd double [[TMP5]], [[TMP6]]
822 // CHECK-32-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
823 // CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
824 // CHECK-32: .omp.reduction.done:
825 // CHECK-32-NEXT: ret void
828 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
829 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
830 // CHECK-32-NEXT: entry:
831 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
832 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
833 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
834 // CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
835 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 4
836 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
837 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
838 // CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
839 // CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
840 // CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
841 // CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
842 // CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
843 // CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
844 // CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
845 // CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
846 // CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
847 // CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
848 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i32 1
849 // CHECK-32-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
850 // CHECK-32-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
851 // CHECK-32-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
852 // CHECK-32-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
853 // CHECK-32-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
854 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i32 1
855 // CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
856 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
857 // CHECK-32-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
858 // CHECK-32-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
859 // CHECK-32-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
860 // CHECK-32-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
861 // CHECK-32-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
862 // CHECK-32-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
863 // CHECK-32-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
864 // CHECK-32-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
865 // CHECK-32-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
866 // CHECK-32-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
867 // CHECK-32-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
868 // CHECK-32-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
869 // CHECK-32-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
870 // CHECK-32: then:
871 // CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
872 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
873 // CHECK-32: else:
874 // CHECK-32-NEXT: br label [[IFCONT]]
875 // CHECK-32: ifcont:
876 // CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
877 // CHECK-32-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
878 // CHECK-32-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
879 // CHECK-32-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
880 // CHECK-32: then4:
881 // CHECK-32-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
882 // CHECK-32-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 4
883 // CHECK-32-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
884 // CHECK-32-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 4
885 // CHECK-32-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
886 // CHECK-32-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
887 // CHECK-32-NEXT: br label [[IFCONT6:%.*]]
888 // CHECK-32: else5:
889 // CHECK-32-NEXT: br label [[IFCONT6]]
890 // CHECK-32: ifcont6:
891 // CHECK-32-NEXT: ret void
894 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
895 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
896 // CHECK-32-NEXT: entry:
897 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
898 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
899 // CHECK-32-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
900 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
901 // CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
902 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
903 // CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
904 // CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
905 // CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
906 // CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
907 // CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
908 // CHECK-32-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
909 // CHECK-32-NEXT: br label [[PRECOND:%.*]]
910 // CHECK-32: precond:
911 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
912 // CHECK-32-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
913 // CHECK-32-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
914 // CHECK-32: body:
915 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
916 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
917 // CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
918 // CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
919 // CHECK-32: then:
920 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
921 // CHECK-32-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4
922 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
923 // CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
924 // CHECK-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
925 // CHECK-32-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
926 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
927 // CHECK-32: else:
928 // CHECK-32-NEXT: br label [[IFCONT]]
929 // CHECK-32: ifcont:
930 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
931 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
932 // CHECK-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
933 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
934 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
935 // CHECK-32: then3:
936 // CHECK-32-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
937 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
938 // CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
939 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
940 // CHECK-32-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
941 // CHECK-32-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
942 // CHECK-32-NEXT: br label [[IFCONT4:%.*]]
943 // CHECK-32: else4:
944 // CHECK-32-NEXT: br label [[IFCONT4]]
945 // CHECK-32: ifcont5:
946 // CHECK-32-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
947 // CHECK-32-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
948 // CHECK-32-NEXT: br label [[PRECOND]]
949 // CHECK-32: exit:
950 // CHECK-32-NEXT: ret void
953 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
954 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
955 // CHECK-32-NEXT: entry:
956 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
957 // CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
958 // CHECK-32-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
959 // CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
960 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
961 // CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
962 // CHECK-32-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
963 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
964 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
965 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
966 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
967 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
968 // CHECK-32: user_code.entry:
969 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
970 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
971 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
972 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
973 // CHECK-32-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
974 // CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
975 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
976 // CHECK-32-NEXT: ret void
977 // CHECK-32: worker.exit:
978 // CHECK-32-NEXT: ret void
981 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
982 // CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
983 // CHECK-32-NEXT: entry:
984 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
985 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
986 // CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
987 // CHECK-32-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
988 // CHECK-32-NEXT: [[C1:%.*]] = alloca i8, align 1
989 // CHECK-32-NEXT: [[D2:%.*]] = alloca float, align 4
990 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
991 // CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
992 // CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
993 // CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
994 // CHECK-32-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
995 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
996 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
997 // CHECK-32-NEXT: store i8 0, ptr [[C1]], align 1
998 // CHECK-32-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
999 // CHECK-32-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
1000 // CHECK-32-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
1001 // CHECK-32-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
1002 // CHECK-32-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
1003 // CHECK-32-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
1004 // CHECK-32-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
1005 // CHECK-32-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
1006 // CHECK-32-NEXT: store float [[MUL]], ptr [[D2]], align 4
1007 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1008 // CHECK-32-NEXT: store ptr [[C1]], ptr [[TMP4]], align 4
1009 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1010 // CHECK-32-NEXT: store ptr [[D2]], ptr [[TMP5]], align 4
1011 // CHECK-32-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
1012 // CHECK-32-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 1
1013 // CHECK-32-NEXT: br i1 [[TMP7]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1014 // CHECK-32: .omp.reduction.then:
1015 // CHECK-32-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
1016 // CHECK-32-NEXT: [[CONV4:%.*]] = sext i8 [[TMP8]] to i32
1017 // CHECK-32-NEXT: [[TMP9:%.*]] = load i8, ptr [[C1]], align 1
1018 // CHECK-32-NEXT: [[CONV5:%.*]] = sext i8 [[TMP9]] to i32
1019 // CHECK-32-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
1020 // CHECK-32-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
1021 // CHECK-32-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
1022 // CHECK-32-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP1]], align 4
1023 // CHECK-32-NEXT: [[TMP11:%.*]] = load float, ptr [[D2]], align 4
1024 // CHECK-32-NEXT: [[MUL8:%.*]] = fmul float [[TMP10]], [[TMP11]]
1025 // CHECK-32-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
1026 // CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1027 // CHECK-32: .omp.reduction.done:
1028 // CHECK-32-NEXT: ret void
1031 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
1032 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1033 // CHECK-32-NEXT: entry:
1034 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1035 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1036 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1037 // CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1038 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1039 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
1040 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
1041 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1042 // CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1043 // CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1044 // CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1045 // CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1046 // CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1047 // CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1048 // CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1049 // CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1050 // CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1051 // CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1052 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1053 // CHECK-32-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
1054 // CHECK-32-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
1055 // CHECK-32-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
1056 // CHECK-32-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
1057 // CHECK-32-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
1058 // CHECK-32-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
1059 // CHECK-32-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
1060 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1061 // CHECK-32-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1062 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1063 // CHECK-32-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1064 // CHECK-32-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 4
1065 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1066 // CHECK-32-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i32 1
1067 // CHECK-32-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
1068 // CHECK-32-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
1069 // CHECK-32-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
1070 // CHECK-32-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
1071 // CHECK-32-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
1072 // CHECK-32-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i32 1
1073 // CHECK-32-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
1074 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 4
1075 // CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
1076 // CHECK-32-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
1077 // CHECK-32-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1078 // CHECK-32-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
1079 // CHECK-32-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
1080 // CHECK-32-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
1081 // CHECK-32-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
1082 // CHECK-32-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
1083 // CHECK-32-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
1084 // CHECK-32-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
1085 // CHECK-32-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
1086 // CHECK-32-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
1087 // CHECK-32-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
1088 // CHECK-32: then:
1089 // CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
1090 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1091 // CHECK-32: else:
1092 // CHECK-32-NEXT: br label [[IFCONT]]
1093 // CHECK-32: ifcont:
1094 // CHECK-32-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
1095 // CHECK-32-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1096 // CHECK-32-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
1097 // CHECK-32-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1098 // CHECK-32: then5:
1099 // CHECK-32-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1100 // CHECK-32-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
1101 // CHECK-32-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1102 // CHECK-32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
1103 // CHECK-32-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
1104 // CHECK-32-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
1105 // CHECK-32-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1106 // CHECK-32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
1107 // CHECK-32-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1108 // CHECK-32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
1109 // CHECK-32-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
1110 // CHECK-32-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
1111 // CHECK-32-NEXT: br label [[IFCONT7:%.*]]
1112 // CHECK-32: else6:
1113 // CHECK-32-NEXT: br label [[IFCONT7]]
1114 // CHECK-32: ifcont7:
1115 // CHECK-32-NEXT: ret void
1118 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
1119 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1120 // CHECK-32-NEXT: entry:
1121 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1122 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1123 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1124 // CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1125 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1126 // CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1127 // CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1128 // CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1129 // CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1130 // CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1131 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1132 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1133 // CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1134 // CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1135 // CHECK-32: then:
1136 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1137 // CHECK-32-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
1138 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1139 // CHECK-32-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
1140 // CHECK-32-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
1141 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1142 // CHECK-32: else:
1143 // CHECK-32-NEXT: br label [[IFCONT]]
1144 // CHECK-32: ifcont:
1145 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1146 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1147 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1148 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
1149 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1150 // CHECK-32: then3:
1151 // CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1152 // CHECK-32-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1153 // CHECK-32-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
1154 // CHECK-32-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
1155 // CHECK-32-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
1156 // CHECK-32-NEXT: br label [[IFCONT4:%.*]]
1157 // CHECK-32: else4:
1158 // CHECK-32-NEXT: br label [[IFCONT4]]
1159 // CHECK-32: ifcont5:
1160 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1161 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1162 // CHECK-32-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1163 // CHECK-32-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1164 // CHECK-32: then8:
1165 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1166 // CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1167 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1168 // CHECK-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
1169 // CHECK-32-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
1170 // CHECK-32-NEXT: br label [[IFCONT8:%.*]]
1171 // CHECK-32: else9:
1172 // CHECK-32-NEXT: br label [[IFCONT8]]
1173 // CHECK-32: ifcont10:
1174 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1175 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1176 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1177 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
1178 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1179 // CHECK-32: then13:
1180 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1181 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1182 // CHECK-32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
1183 // CHECK-32-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
1184 // CHECK-32-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
1185 // CHECK-32-NEXT: br label [[IFCONT12:%.*]]
1186 // CHECK-32: else14:
1187 // CHECK-32-NEXT: br label [[IFCONT12]]
1188 // CHECK-32: ifcont15:
1189 // CHECK-32-NEXT: ret void
1192 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
1193 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
1194 // CHECK-32-NEXT: entry:
1195 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1196 // CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1197 // CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1198 // CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
1199 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1200 // CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1201 // CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1202 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1203 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1204 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
1205 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1206 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1207 // CHECK-32: user_code.entry:
1208 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1209 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1210 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
1211 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1212 // CHECK-32-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
1213 // CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
1214 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
1215 // CHECK-32-NEXT: ret void
1216 // CHECK-32: worker.exit:
1217 // CHECK-32-NEXT: ret void
1220 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
1221 // CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
1222 // CHECK-32-NEXT: entry:
1223 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1224 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1225 // CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1226 // CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1227 // CHECK-32-NEXT: [[A1:%.*]] = alloca i32, align 4
1228 // CHECK-32-NEXT: [[B2:%.*]] = alloca i16, align 2
1229 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
1230 // CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1231 // CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1232 // CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1233 // CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1234 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1235 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1236 // CHECK-32-NEXT: store i32 0, ptr [[A1]], align 4
1237 // CHECK-32-NEXT: store i16 -32768, ptr [[B2]], align 2
1238 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
1239 // CHECK-32-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
1240 // CHECK-32-NEXT: store i32 [[OR]], ptr [[A1]], align 4
1241 // CHECK-32-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
1242 // CHECK-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
1243 // CHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
1244 // CHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1245 // CHECK-32: cond.true:
1246 // CHECK-32-NEXT: br label [[COND_END:%.*]]
1247 // CHECK-32: cond.false:
1248 // CHECK-32-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
1249 // CHECK-32-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
1250 // CHECK-32-NEXT: br label [[COND_END]]
1251 // CHECK-32: cond.end:
1252 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
1253 // CHECK-32-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
1254 // CHECK-32-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
1255 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1256 // CHECK-32-NEXT: store ptr [[A1]], ptr [[TMP5]], align 4
1257 // CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1258 // CHECK-32-NEXT: store ptr [[B2]], ptr [[TMP6]], align 4
1259 // CHECK-32-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
1260 // CHECK-32-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 1
1261 // CHECK-32-NEXT: br i1 [[TMP8]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1262 // CHECK-32: .omp.reduction.then:
1263 // CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4
1264 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[A1]], align 4
1265 // CHECK-32-NEXT: [[OR5:%.*]] = or i32 [[TMP9]], [[TMP10]]
1266 // CHECK-32-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
1267 // CHECK-32-NEXT: [[TMP11:%.*]] = load i16, ptr [[TMP1]], align 2
1268 // CHECK-32-NEXT: [[CONV6:%.*]] = sext i16 [[TMP11]] to i32
1269 // CHECK-32-NEXT: [[TMP12:%.*]] = load i16, ptr [[B2]], align 2
1270 // CHECK-32-NEXT: [[CONV7:%.*]] = sext i16 [[TMP12]] to i32
1271 // CHECK-32-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
1272 // CHECK-32-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
1273 // CHECK-32: cond.true9:
1274 // CHECK-32-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
1275 // CHECK-32-NEXT: br label [[COND_END11:%.*]]
1276 // CHECK-32: cond.false10:
1277 // CHECK-32-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
1278 // CHECK-32-NEXT: br label [[COND_END11]]
1279 // CHECK-32: cond.end11:
1280 // CHECK-32-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP13]], [[COND_TRUE9]] ], [ [[TMP14]], [[COND_FALSE10]] ]
1281 // CHECK-32-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
1282 // CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1283 // CHECK-32: .omp.reduction.done:
1284 // CHECK-32-NEXT: ret void
1287 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
1288 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1289 // CHECK-32-NEXT: entry:
1290 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1291 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1292 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1293 // CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1294 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1295 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
1296 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
1297 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1298 // CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1299 // CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1300 // CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1301 // CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1302 // CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1303 // CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1304 // CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1305 // CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1306 // CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1307 // CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1308 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1309 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
1310 // CHECK-32-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
1311 // CHECK-32-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
1312 // CHECK-32-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
1313 // CHECK-32-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
1314 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1315 // CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1316 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1317 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1318 // CHECK-32-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 4
1319 // CHECK-32-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1320 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
1321 // CHECK-32-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
1322 // CHECK-32-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
1323 // CHECK-32-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
1324 // CHECK-32-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
1325 // CHECK-32-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
1326 // CHECK-32-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
1327 // CHECK-32-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
1328 // CHECK-32-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
1329 // CHECK-32-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
1330 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 4
1331 // CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
1332 // CHECK-32-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
1333 // CHECK-32-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1334 // CHECK-32-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
1335 // CHECK-32-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
1336 // CHECK-32-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
1337 // CHECK-32-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
1338 // CHECK-32-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
1339 // CHECK-32-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
1340 // CHECK-32-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
1341 // CHECK-32-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
1342 // CHECK-32-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
1343 // CHECK-32-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
1344 // CHECK-32: then:
1345 // CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
1346 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1347 // CHECK-32: else:
1348 // CHECK-32-NEXT: br label [[IFCONT]]
1349 // CHECK-32: ifcont:
1350 // CHECK-32-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
1351 // CHECK-32-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1352 // CHECK-32-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
1353 // CHECK-32-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1354 // CHECK-32: then5:
1355 // CHECK-32-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1356 // CHECK-32-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
1357 // CHECK-32-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1358 // CHECK-32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
1359 // CHECK-32-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
1360 // CHECK-32-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
1361 // CHECK-32-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1362 // CHECK-32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
1363 // CHECK-32-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1364 // CHECK-32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
1365 // CHECK-32-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
1366 // CHECK-32-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
1367 // CHECK-32-NEXT: br label [[IFCONT7:%.*]]
1368 // CHECK-32: else6:
1369 // CHECK-32-NEXT: br label [[IFCONT7]]
1370 // CHECK-32: ifcont7:
1371 // CHECK-32-NEXT: ret void
1374 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
1375 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1376 // CHECK-32-NEXT: entry:
1377 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1378 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1379 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1380 // CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1381 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1382 // CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1383 // CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1384 // CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1385 // CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1386 // CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1387 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1388 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1389 // CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1390 // CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1391 // CHECK-32: then:
1392 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1393 // CHECK-32-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
1394 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1395 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
1396 // CHECK-32-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
1397 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1398 // CHECK-32: else:
1399 // CHECK-32-NEXT: br label [[IFCONT]]
1400 // CHECK-32: ifcont:
1401 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1402 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1403 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1404 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
1405 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1406 // CHECK-32: then3:
1407 // CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1408 // CHECK-32-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1409 // CHECK-32-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
1410 // CHECK-32-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
1411 // CHECK-32-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
1412 // CHECK-32-NEXT: br label [[IFCONT4:%.*]]
1413 // CHECK-32: else4:
1414 // CHECK-32-NEXT: br label [[IFCONT4]]
1415 // CHECK-32: ifcont5:
1416 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1417 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1418 // CHECK-32-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1419 // CHECK-32-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1420 // CHECK-32: then8:
1421 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1422 // CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1423 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1424 // CHECK-32-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
1425 // CHECK-32-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
1426 // CHECK-32-NEXT: br label [[IFCONT8:%.*]]
1427 // CHECK-32: else9:
1428 // CHECK-32-NEXT: br label [[IFCONT8]]
1429 // CHECK-32: ifcont10:
1430 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1431 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1432 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1433 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
1434 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1435 // CHECK-32: then13:
1436 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1437 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1438 // CHECK-32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
1439 // CHECK-32-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
1440 // CHECK-32-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
1441 // CHECK-32-NEXT: br label [[IFCONT12:%.*]]
1442 // CHECK-32: else14:
1443 // CHECK-32-NEXT: br label [[IFCONT12]]
1444 // CHECK-32: ifcont15:
1445 // CHECK-32-NEXT: ret void
1448 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
1449 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
1450 // CHECK-32-EX-NEXT: entry:
1451 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1452 // CHECK-32-EX-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
1453 // CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
1454 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1455 // CHECK-32-EX-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
1456 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
1457 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
1458 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1459 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1460 // CHECK-32-EX: user_code.entry:
1461 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
1462 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1463 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
1464 // CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
1465 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1466 // CHECK-32-EX-NEXT: ret void
1467 // CHECK-32-EX: worker.exit:
1468 // CHECK-32-EX-NEXT: ret void
1471 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
1472 // CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
1473 // CHECK-32-EX-NEXT: entry:
1474 // CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1475 // CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1476 // CHECK-32-EX-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
1477 // CHECK-32-EX-NEXT: [[E1:%.*]] = alloca double, align 8
1478 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
1479 // CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1480 // CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1481 // CHECK-32-EX-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
1482 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
1483 // CHECK-32-EX-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
1484 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
1485 // CHECK-32-EX-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
1486 // CHECK-32-EX-NEXT: store double [[ADD]], ptr [[E1]], align 8
1487 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1488 // CHECK-32-EX-NEXT: store ptr [[E1]], ptr [[TMP2]], align 4
1489 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
1490 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 1
1491 // CHECK-32-EX-NEXT: br i1 [[TMP4]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1492 // CHECK-32-EX: .omp.reduction.then:
1493 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP0]], align 8
1494 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load double, ptr [[E1]], align 8
1495 // CHECK-32-EX-NEXT: [[ADD2:%.*]] = fadd double [[TMP5]], [[TMP6]]
1496 // CHECK-32-EX-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
1497 // CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1498 // CHECK-32-EX: .omp.reduction.done:
1499 // CHECK-32-EX-NEXT: ret void
1502 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
1503 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
1504 // CHECK-32-EX-NEXT: entry:
1505 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1506 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1507 // CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1508 // CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1509 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 4
1510 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
1511 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1512 // CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1513 // CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1514 // CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1515 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1516 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1517 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1518 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1519 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
1520 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1521 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1522 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i32 1
1523 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
1524 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
1525 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
1526 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
1527 // CHECK-32-EX-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
1528 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i32 1
1529 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1530 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1531 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
1532 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
1533 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1534 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
1535 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
1536 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
1537 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
1538 // CHECK-32-EX-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
1539 // CHECK-32-EX-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
1540 // CHECK-32-EX-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
1541 // CHECK-32-EX-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
1542 // CHECK-32-EX-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
1543 // CHECK-32-EX-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
1544 // CHECK-32-EX: then:
1545 // CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
1546 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1547 // CHECK-32-EX: else:
1548 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1549 // CHECK-32-EX: ifcont:
1550 // CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
1551 // CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1552 // CHECK-32-EX-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
1553 // CHECK-32-EX-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
1554 // CHECK-32-EX: then4:
1555 // CHECK-32-EX-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1556 // CHECK-32-EX-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 4
1557 // CHECK-32-EX-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
1558 // CHECK-32-EX-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 4
1559 // CHECK-32-EX-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
1560 // CHECK-32-EX-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
1561 // CHECK-32-EX-NEXT: br label [[IFCONT6:%.*]]
1562 // CHECK-32-EX: else5:
1563 // CHECK-32-EX-NEXT: br label [[IFCONT6]]
1564 // CHECK-32-EX: ifcont6:
1565 // CHECK-32-EX-NEXT: ret void
1568 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
1569 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1570 // CHECK-32-EX-NEXT: entry:
1571 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1572 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1573 // CHECK-32-EX-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
1574 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1575 // CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1576 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1577 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1578 // CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1579 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1580 // CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1581 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1582 // CHECK-32-EX-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
1583 // CHECK-32-EX-NEXT: br label [[PRECOND:%.*]]
1584 // CHECK-32-EX: precond:
1585 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
1586 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
1587 // CHECK-32-EX-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
1588 // CHECK-32-EX: body:
1589 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1590 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
1591 // CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1592 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1593 // CHECK-32-EX: then:
1594 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
1595 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4
1596 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
1597 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1598 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
1599 // CHECK-32-EX-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
1600 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1601 // CHECK-32-EX: else:
1602 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1603 // CHECK-32-EX: ifcont:
1604 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1605 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1606 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1607 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
1608 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1609 // CHECK-32-EX: then3:
1610 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1611 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
1612 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1613 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
1614 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
1615 // CHECK-32-EX-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
1616 // CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
1617 // CHECK-32-EX: else4:
1618 // CHECK-32-EX-NEXT: br label [[IFCONT4]]
1619 // CHECK-32-EX: ifcont5:
1620 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
1621 // CHECK-32-EX-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
1622 // CHECK-32-EX-NEXT: br label [[PRECOND]]
1623 // CHECK-32-EX: exit:
1624 // CHECK-32-EX-NEXT: ret void
1627 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
1628 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
1629 // CHECK-32-EX-NEXT: entry:
1630 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1631 // CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
1632 // CHECK-32-EX-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
1633 // CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
1634 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1635 // CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
1636 // CHECK-32-EX-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
1637 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
1638 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
1639 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
1640 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1641 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1642 // CHECK-32-EX: user_code.entry:
1643 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1644 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1645 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
1646 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1647 // CHECK-32-EX-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
1648 // CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
1649 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1650 // CHECK-32-EX-NEXT: ret void
1651 // CHECK-32-EX: worker.exit:
1652 // CHECK-32-EX-NEXT: ret void
1655 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
1656 // CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
1657 // CHECK-32-EX-NEXT: entry:
1658 // CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1659 // CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1660 // CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
1661 // CHECK-32-EX-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
1662 // CHECK-32-EX-NEXT: [[C1:%.*]] = alloca i8, align 1
1663 // CHECK-32-EX-NEXT: [[D2:%.*]] = alloca float, align 4
1664 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
1665 // CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1666 // CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1667 // CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
1668 // CHECK-32-EX-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
1669 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
1670 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
1671 // CHECK-32-EX-NEXT: store i8 0, ptr [[C1]], align 1
1672 // CHECK-32-EX-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
1673 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
1674 // CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
1675 // CHECK-32-EX-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
1676 // CHECK-32-EX-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
1677 // CHECK-32-EX-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
1678 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
1679 // CHECK-32-EX-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
1680 // CHECK-32-EX-NEXT: store float [[MUL]], ptr [[D2]], align 4
1681 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1682 // CHECK-32-EX-NEXT: store ptr [[C1]], ptr [[TMP4]], align 4
1683 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1684 // CHECK-32-EX-NEXT: store ptr [[D2]], ptr [[TMP5]], align 4
1685 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
1686 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 1
1687 // CHECK-32-EX-NEXT: br i1 [[TMP7]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1688 // CHECK-32-EX: .omp.reduction.then:
1689 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
1690 // CHECK-32-EX-NEXT: [[CONV4:%.*]] = sext i8 [[TMP8]] to i32
1691 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i8, ptr [[C1]], align 1
1692 // CHECK-32-EX-NEXT: [[CONV5:%.*]] = sext i8 [[TMP9]] to i32
1693 // CHECK-32-EX-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
1694 // CHECK-32-EX-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
1695 // CHECK-32-EX-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
1696 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP1]], align 4
1697 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load float, ptr [[D2]], align 4
1698 // CHECK-32-EX-NEXT: [[MUL8:%.*]] = fmul float [[TMP10]], [[TMP11]]
1699 // CHECK-32-EX-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
1700 // CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1701 // CHECK-32-EX: .omp.reduction.done:
1702 // CHECK-32-EX-NEXT: ret void
1705 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
1706 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1707 // CHECK-32-EX-NEXT: entry:
1708 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1709 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1710 // CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1711 // CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1712 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1713 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
1714 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
1715 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1716 // CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1717 // CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1718 // CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1719 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1720 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1721 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1722 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1723 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1724 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1725 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1726 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1727 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
1728 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
1729 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
1730 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
1731 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
1732 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
1733 // CHECK-32-EX-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
1734 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1735 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1736 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1737 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1738 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 4
1739 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1740 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i32 1
1741 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
1742 // CHECK-32-EX-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
1743 // CHECK-32-EX-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
1744 // CHECK-32-EX-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
1745 // CHECK-32-EX-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
1746 // CHECK-32-EX-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i32 1
1747 // CHECK-32-EX-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
1748 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 4
1749 // CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
1750 // CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
1751 // CHECK-32-EX-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1752 // CHECK-32-EX-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
1753 // CHECK-32-EX-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
1754 // CHECK-32-EX-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
1755 // CHECK-32-EX-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
1756 // CHECK-32-EX-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
1757 // CHECK-32-EX-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
1758 // CHECK-32-EX-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
1759 // CHECK-32-EX-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
1760 // CHECK-32-EX-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
1761 // CHECK-32-EX-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
1762 // CHECK-32-EX: then:
1763 // CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
1764 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1765 // CHECK-32-EX: else:
1766 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1767 // CHECK-32-EX: ifcont:
1768 // CHECK-32-EX-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
1769 // CHECK-32-EX-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1770 // CHECK-32-EX-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
1771 // CHECK-32-EX-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1772 // CHECK-32-EX: then5:
1773 // CHECK-32-EX-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1774 // CHECK-32-EX-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
1775 // CHECK-32-EX-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1776 // CHECK-32-EX-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
1777 // CHECK-32-EX-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
1778 // CHECK-32-EX-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
1779 // CHECK-32-EX-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1780 // CHECK-32-EX-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
1781 // CHECK-32-EX-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1782 // CHECK-32-EX-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
1783 // CHECK-32-EX-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
1784 // CHECK-32-EX-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
1785 // CHECK-32-EX-NEXT: br label [[IFCONT7:%.*]]
1786 // CHECK-32-EX: else6:
1787 // CHECK-32-EX-NEXT: br label [[IFCONT7]]
1788 // CHECK-32-EX: ifcont7:
1789 // CHECK-32-EX-NEXT: ret void
1792 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
1793 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1794 // CHECK-32-EX-NEXT: entry:
1795 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1796 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1797 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1798 // CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1799 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1800 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1801 // CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1802 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1803 // CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1804 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1805 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1806 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1807 // CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1808 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1809 // CHECK-32-EX: then:
1810 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1811 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
1812 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1813 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
1814 // CHECK-32-EX-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
1815 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1816 // CHECK-32-EX: else:
1817 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1818 // CHECK-32-EX: ifcont:
1819 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1820 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1821 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1822 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
1823 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1824 // CHECK-32-EX: then3:
1825 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1826 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1827 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
1828 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
1829 // CHECK-32-EX-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
1830 // CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
1831 // CHECK-32-EX: else4:
1832 // CHECK-32-EX-NEXT: br label [[IFCONT4]]
1833 // CHECK-32-EX: ifcont5:
1834 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1835 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1836 // CHECK-32-EX-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1837 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1838 // CHECK-32-EX: then8:
1839 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1840 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1841 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1842 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
1843 // CHECK-32-EX-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
1844 // CHECK-32-EX-NEXT: br label [[IFCONT8:%.*]]
1845 // CHECK-32-EX: else9:
1846 // CHECK-32-EX-NEXT: br label [[IFCONT8]]
1847 // CHECK-32-EX: ifcont10:
1848 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1849 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1850 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1851 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
1852 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1853 // CHECK-32-EX: then13:
1854 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1855 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1856 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
1857 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
1858 // CHECK-32-EX-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
1859 // CHECK-32-EX-NEXT: br label [[IFCONT12:%.*]]
1860 // CHECK-32-EX: else14:
1861 // CHECK-32-EX-NEXT: br label [[IFCONT12]]
1862 // CHECK-32-EX: ifcont15:
1863 // CHECK-32-EX-NEXT: ret void
1866 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
1867 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
1868 // CHECK-32-EX-NEXT: entry:
1869 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1870 // CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1871 // CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1872 // CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
1873 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1874 // CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1875 // CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1876 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1877 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1878 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
1879 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1880 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1881 // CHECK-32-EX: user_code.entry:
1882 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1883 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1884 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
1885 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1886 // CHECK-32-EX-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
1887 // CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
1888 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1889 // CHECK-32-EX-NEXT: ret void
1890 // CHECK-32-EX: worker.exit:
1891 // CHECK-32-EX-NEXT: ret void
1894 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
1895 // CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
1896 // CHECK-32-EX-NEXT: entry:
1897 // CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1898 // CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1899 // CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1900 // CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1901 // CHECK-32-EX-NEXT: [[A1:%.*]] = alloca i32, align 4
1902 // CHECK-32-EX-NEXT: [[B2:%.*]] = alloca i16, align 2
1903 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
1904 // CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1905 // CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1906 // CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1907 // CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1908 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1909 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1910 // CHECK-32-EX-NEXT: store i32 0, ptr [[A1]], align 4
1911 // CHECK-32-EX-NEXT: store i16 -32768, ptr [[B2]], align 2
1912 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
1913 // CHECK-32-EX-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
1914 // CHECK-32-EX-NEXT: store i32 [[OR]], ptr [[A1]], align 4
1915 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
1916 // CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
1917 // CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
1918 // CHECK-32-EX-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1919 // CHECK-32-EX: cond.true:
1920 // CHECK-32-EX-NEXT: br label [[COND_END:%.*]]
1921 // CHECK-32-EX: cond.false:
1922 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
1923 // CHECK-32-EX-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
1924 // CHECK-32-EX-NEXT: br label [[COND_END]]
1925 // CHECK-32-EX: cond.end:
1926 // CHECK-32-EX-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
1927 // CHECK-32-EX-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
1928 // CHECK-32-EX-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
1929 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1930 // CHECK-32-EX-NEXT: store ptr [[A1]], ptr [[TMP5]], align 4
1931 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1932 // CHECK-32-EX-NEXT: store ptr [[B2]], ptr [[TMP6]], align 4
1933 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
1934 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 1
1935 // CHECK-32-EX-NEXT: br i1 [[TMP8]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1936 // CHECK-32-EX: .omp.reduction.then:
1937 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4
1938 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[A1]], align 4
1939 // CHECK-32-EX-NEXT: [[OR5:%.*]] = or i32 [[TMP9]], [[TMP10]]
1940 // CHECK-32-EX-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
1941 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i16, ptr [[TMP1]], align 2
1942 // CHECK-32-EX-NEXT: [[CONV6:%.*]] = sext i16 [[TMP11]] to i32
1943 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i16, ptr [[B2]], align 2
1944 // CHECK-32-EX-NEXT: [[CONV7:%.*]] = sext i16 [[TMP12]] to i32
1945 // CHECK-32-EX-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
1946 // CHECK-32-EX-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
1947 // CHECK-32-EX: cond.true9:
1948 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
1949 // CHECK-32-EX-NEXT: br label [[COND_END11:%.*]]
1950 // CHECK-32-EX: cond.false10:
1951 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
1952 // CHECK-32-EX-NEXT: br label [[COND_END11]]
1953 // CHECK-32-EX: cond.end11:
1954 // CHECK-32-EX-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP13]], [[COND_TRUE9]] ], [ [[TMP14]], [[COND_FALSE10]] ]
1955 // CHECK-32-EX-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
1956 // CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1957 // CHECK-32-EX: .omp.reduction.done:
1958 // CHECK-32-EX-NEXT: ret void
1961 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
1962 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1963 // CHECK-32-EX-NEXT: entry:
1964 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1965 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1966 // CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1967 // CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1968 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1969 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
1970 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
1971 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1972 // CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1973 // CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1974 // CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1975 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1976 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1977 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1978 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1979 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1980 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1981 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1982 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1983 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
1984 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
1985 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
1986 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
1987 // CHECK-32-EX-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
1988 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1989 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1990 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1991 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1992 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 4
1993 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1994 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
1995 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
1996 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
1997 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
1998 // CHECK-32-EX-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
1999 // CHECK-32-EX-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
2000 // CHECK-32-EX-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
2001 // CHECK-32-EX-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
2002 // CHECK-32-EX-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
2003 // CHECK-32-EX-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
2004 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 4
2005 // CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
2006 // CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
2007 // CHECK-32-EX-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
2008 // CHECK-32-EX-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
2009 // CHECK-32-EX-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
2010 // CHECK-32-EX-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
2011 // CHECK-32-EX-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
2012 // CHECK-32-EX-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
2013 // CHECK-32-EX-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
2014 // CHECK-32-EX-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
2015 // CHECK-32-EX-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
2016 // CHECK-32-EX-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
2017 // CHECK-32-EX-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
2018 // CHECK-32-EX: then:
2019 // CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
2020 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
2021 // CHECK-32-EX: else:
2022 // CHECK-32-EX-NEXT: br label [[IFCONT]]
2023 // CHECK-32-EX: ifcont:
2024 // CHECK-32-EX-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
2025 // CHECK-32-EX-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
2026 // CHECK-32-EX-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
2027 // CHECK-32-EX-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
2028 // CHECK-32-EX: then5:
2029 // CHECK-32-EX-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2030 // CHECK-32-EX-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
2031 // CHECK-32-EX-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
2032 // CHECK-32-EX-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
2033 // CHECK-32-EX-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
2034 // CHECK-32-EX-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
2035 // CHECK-32-EX-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
2036 // CHECK-32-EX-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
2037 // CHECK-32-EX-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
2038 // CHECK-32-EX-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
2039 // CHECK-32-EX-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
2040 // CHECK-32-EX-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
2041 // CHECK-32-EX-NEXT: br label [[IFCONT7:%.*]]
2042 // CHECK-32-EX: else6:
2043 // CHECK-32-EX-NEXT: br label [[IFCONT7]]
2044 // CHECK-32-EX: ifcont7:
2045 // CHECK-32-EX-NEXT: ret void
2048 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
2049 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
2050 // CHECK-32-EX-NEXT: entry:
2051 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
2052 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2053 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
2054 // CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
2055 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2056 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2057 // CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
2058 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2059 // CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
2060 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
2061 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
2062 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2063 // CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2064 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
2065 // CHECK-32-EX: then:
2066 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
2067 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
2068 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2069 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
2070 // CHECK-32-EX-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
2071 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
2072 // CHECK-32-EX: else:
2073 // CHECK-32-EX-NEXT: br label [[IFCONT]]
2074 // CHECK-32-EX: ifcont:
2075 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
2076 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2077 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
2078 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
2079 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
2080 // CHECK-32-EX: then3:
2081 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2082 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
2083 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
2084 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
2085 // CHECK-32-EX-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
2086 // CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
2087 // CHECK-32-EX: else4:
2088 // CHECK-32-EX-NEXT: br label [[IFCONT4]]
2089 // CHECK-32-EX: ifcont5:
2090 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
2091 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2092 // CHECK-32-EX-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2093 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
2094 // CHECK-32-EX: then8:
2095 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
2096 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
2097 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2098 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
2099 // CHECK-32-EX-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
2100 // CHECK-32-EX-NEXT: br label [[IFCONT8:%.*]]
2101 // CHECK-32-EX: else9:
2102 // CHECK-32-EX-NEXT: br label [[IFCONT8]]
2103 // CHECK-32-EX: ifcont10:
2104 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
2105 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2106 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
2107 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
2108 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
2109 // CHECK-32-EX: then13:
2110 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2111 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
2112 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
2113 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
2114 // CHECK-32-EX-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
2115 // CHECK-32-EX-NEXT: br label [[IFCONT12:%.*]]
2116 // CHECK-32-EX: else14:
2117 // CHECK-32-EX-NEXT: br label [[IFCONT12]]
2118 // CHECK-32-EX: ifcont15:
2119 // CHECK-32-EX-NEXT: ret void