1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test target codegen - host bc file has to be created first.
3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
4 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK-64
5 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32
7 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32-EX
8 // expected-no-diagnostics
12 // Check for the data transfer medium in shared memory to transfer the reduction list to the first warp.
14 // Check that the execution mode of all 3 target regions is set to Spmd Mode.
24 #pragma omp target parallel reduction(+: e)
29 #pragma omp target parallel reduction(^: c) reduction(*: d)
35 #pragma omp target parallel reduction(|: a) reduction(max: b)
47 a
+= ftemplate
<char>(n
);
52 // define internal void [[PFN]](
57 // Shuffle and reduce function
58 // Condition to reduce
59 // Now check if we should just copy over the remote reduction list
61 // Inter warp copy function
63 // Barrier after copy to shared memory storage medium.
66 // define internal void [[PFN1]](
70 // Shuffle and reduce function
71 // Condition to reduce
72 // Now check if we should just copy over the remote reduction list
74 // Inter warp copy function
76 // Barrier after copy to shared memory storage medium.
79 // Barrier after copy to shared memory storage medium.
82 // define internal void [[PFN2]](
87 // Shuffle and reduce function
88 // Condition to reduce
89 // Now check if we should just copy over the remote reduction list
91 // Inter warp copy function
93 // Barrier after copy to shared memory storage medium.
96 // Barrier after copy to shared memory storage medium.
100 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
101 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
102 // CHECK-64-NEXT: entry:
103 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
104 // CHECK-64-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 8
105 // CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
106 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
107 // CHECK-64-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 8
108 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8
109 // CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
110 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
111 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
112 // CHECK-64: user_code.entry:
113 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
114 // CHECK-64-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
115 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
116 // CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
117 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
118 // CHECK-64-NEXT: ret void
119 // CHECK-64: worker.exit:
120 // CHECK-64-NEXT: ret void
123 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
124 // CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
125 // CHECK-64-NEXT: entry:
126 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
127 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
128 // CHECK-64-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 8
129 // CHECK-64-NEXT: [[E1:%.*]] = alloca double, align 8
130 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
131 // CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
132 // CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
133 // CHECK-64-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 8
134 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8
135 // CHECK-64-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
136 // CHECK-64-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
137 // CHECK-64-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
138 // CHECK-64-NEXT: store double [[ADD]], ptr [[E1]], align 8
139 // CHECK-64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
140 // CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
141 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
142 // CHECK-64-NEXT: store ptr [[E1]], ptr [[TMP4]], align 8
143 // CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
144 // CHECK-64-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP5]], 1
145 // CHECK-64-NEXT: br i1 [[TMP6]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
146 // CHECK-64: .omp.reduction.then:
147 // CHECK-64-NEXT: [[TMP7:%.*]] = load double, ptr [[TMP0]], align 8
148 // CHECK-64-NEXT: [[TMP8:%.*]] = load double, ptr [[E1]], align 8
149 // CHECK-64-NEXT: [[ADD2:%.*]] = fadd double [[TMP7]], [[TMP8]]
150 // CHECK-64-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
151 // CHECK-64-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP3]])
152 // CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
153 // CHECK-64: .omp.reduction.done:
154 // CHECK-64-NEXT: ret void
157 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
158 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
159 // CHECK-64-NEXT: entry:
160 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
161 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
162 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
163 // CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
164 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 8
165 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
166 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
167 // CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
168 // CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
169 // CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
170 // CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
171 // CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
172 // CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
173 // CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
174 // CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0
175 // CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
176 // CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
177 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i64 1
178 // CHECK-64-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
179 // CHECK-64-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
180 // CHECK-64-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
181 // CHECK-64-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
182 // CHECK-64-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
183 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i64 1
184 // CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
185 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
186 // CHECK-64-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
187 // CHECK-64-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
188 // CHECK-64-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
189 // CHECK-64-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
190 // CHECK-64-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
191 // CHECK-64-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
192 // CHECK-64-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
193 // CHECK-64-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
194 // CHECK-64-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
195 // CHECK-64-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
196 // CHECK-64-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
197 // CHECK-64-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
198 // CHECK-64-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
200 // CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
201 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
203 // CHECK-64-NEXT: br label [[IFCONT]]
205 // CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
206 // CHECK-64-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
207 // CHECK-64-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
208 // CHECK-64-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
210 // CHECK-64-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
211 // CHECK-64-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8
212 // CHECK-64-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0
213 // CHECK-64-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 8
214 // CHECK-64-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
215 // CHECK-64-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
216 // CHECK-64-NEXT: br label [[IFCONT6:%.*]]
218 // CHECK-64-NEXT: br label [[IFCONT6]]
219 // CHECK-64: ifcont6:
220 // CHECK-64-NEXT: ret void
223 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
224 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
225 // CHECK-64-NEXT: entry:
226 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
227 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
228 // CHECK-64-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
229 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
230 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
231 // CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
232 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
233 // CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
234 // CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
235 // CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
236 // CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
237 // CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
238 // CHECK-64-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
239 // CHECK-64-NEXT: br label [[PRECOND:%.*]]
240 // CHECK-64: precond:
241 // CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
242 // CHECK-64-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
243 // CHECK-64-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
245 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
246 // CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
247 // CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
249 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0
250 // CHECK-64-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8
251 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
252 // CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
253 // CHECK-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
254 // CHECK-64-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
255 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
257 // CHECK-64-NEXT: br label [[IFCONT]]
259 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
260 // CHECK-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
261 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
262 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
264 // CHECK-64-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
265 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0
266 // CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
267 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
268 // CHECK-64-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
269 // CHECK-64-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
270 // CHECK-64-NEXT: br label [[IFCONT4:%.*]]
272 // CHECK-64-NEXT: br label [[IFCONT4]]
273 // CHECK-64: ifcont4:
274 // CHECK-64-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
275 // CHECK-64-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
276 // CHECK-64-NEXT: br label [[PRECOND]]
278 // CHECK-64-NEXT: ret void
281 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
282 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
283 // CHECK-64-NEXT: entry:
284 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
285 // CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
286 // CHECK-64-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
287 // CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
288 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
289 // CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
290 // CHECK-64-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
291 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
292 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 8
293 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
294 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
295 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
296 // CHECK-64: user_code.entry:
297 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
298 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
299 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
300 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
301 // CHECK-64-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 8
302 // CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
303 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
304 // CHECK-64-NEXT: ret void
305 // CHECK-64: worker.exit:
306 // CHECK-64-NEXT: ret void
309 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
310 // CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
311 // CHECK-64-NEXT: entry:
312 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
313 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
314 // CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
315 // CHECK-64-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
316 // CHECK-64-NEXT: [[C1:%.*]] = alloca i8, align 1
317 // CHECK-64-NEXT: [[D2:%.*]] = alloca float, align 4
318 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 8
319 // CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
320 // CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
321 // CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
322 // CHECK-64-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
323 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
324 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 8
325 // CHECK-64-NEXT: store i8 0, ptr [[C1]], align 1
326 // CHECK-64-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
327 // CHECK-64-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
328 // CHECK-64-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
329 // CHECK-64-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
330 // CHECK-64-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
331 // CHECK-64-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
332 // CHECK-64-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
333 // CHECK-64-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
334 // CHECK-64-NEXT: store float [[MUL]], ptr [[D2]], align 4
335 // CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
336 // CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
337 // CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
338 // CHECK-64-NEXT: store ptr [[C1]], ptr [[TMP6]], align 8
339 // CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
340 // CHECK-64-NEXT: store ptr [[D2]], ptr [[TMP7]], align 8
341 // CHECK-64-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP5]], i32 2, i64 16, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
342 // CHECK-64-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 1
343 // CHECK-64-NEXT: br i1 [[TMP9]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
344 // CHECK-64: .omp.reduction.then:
345 // CHECK-64-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP0]], align 1
346 // CHECK-64-NEXT: [[CONV4:%.*]] = sext i8 [[TMP10]] to i32
347 // CHECK-64-NEXT: [[TMP11:%.*]] = load i8, ptr [[C1]], align 1
348 // CHECK-64-NEXT: [[CONV5:%.*]] = sext i8 [[TMP11]] to i32
349 // CHECK-64-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
350 // CHECK-64-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
351 // CHECK-64-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
352 // CHECK-64-NEXT: [[TMP12:%.*]] = load float, ptr [[TMP1]], align 4
353 // CHECK-64-NEXT: [[TMP13:%.*]] = load float, ptr [[D2]], align 4
354 // CHECK-64-NEXT: [[MUL8:%.*]] = fmul float [[TMP12]], [[TMP13]]
355 // CHECK-64-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
356 // CHECK-64-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP5]])
357 // CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
358 // CHECK-64: .omp.reduction.done:
359 // CHECK-64-NEXT: ret void
362 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
363 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
364 // CHECK-64-NEXT: entry:
365 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
366 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
367 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
368 // CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
369 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 8
370 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
371 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
372 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
373 // CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
374 // CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
375 // CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
376 // CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
377 // CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
378 // CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
379 // CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
380 // CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
381 // CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
382 // CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
383 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i64 1
384 // CHECK-64-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
385 // CHECK-64-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
386 // CHECK-64-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
387 // CHECK-64-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
388 // CHECK-64-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
389 // CHECK-64-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
390 // CHECK-64-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
391 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i64 1
392 // CHECK-64-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
393 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
394 // CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
395 // CHECK-64-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8
396 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
397 // CHECK-64-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i64 1
398 // CHECK-64-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
399 // CHECK-64-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
400 // CHECK-64-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
401 // CHECK-64-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
402 // CHECK-64-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
403 // CHECK-64-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i64 1
404 // CHECK-64-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
405 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 8
406 // CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
407 // CHECK-64-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
408 // CHECK-64-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
409 // CHECK-64-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
410 // CHECK-64-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
411 // CHECK-64-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
412 // CHECK-64-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
413 // CHECK-64-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
414 // CHECK-64-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
415 // CHECK-64-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
416 // CHECK-64-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
417 // CHECK-64-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
418 // CHECK-64-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
420 // CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
421 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
423 // CHECK-64-NEXT: br label [[IFCONT]]
425 // CHECK-64-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
426 // CHECK-64-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
427 // CHECK-64-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
428 // CHECK-64-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
430 // CHECK-64-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
431 // CHECK-64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 8
432 // CHECK-64-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
433 // CHECK-64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8
434 // CHECK-64-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
435 // CHECK-64-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
436 // CHECK-64-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
437 // CHECK-64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 8
438 // CHECK-64-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
439 // CHECK-64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 8
440 // CHECK-64-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
441 // CHECK-64-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
442 // CHECK-64-NEXT: br label [[IFCONT7:%.*]]
444 // CHECK-64-NEXT: br label [[IFCONT7]]
445 // CHECK-64: ifcont7:
446 // CHECK-64-NEXT: ret void
449 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
450 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
451 // CHECK-64-NEXT: entry:
452 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
453 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
454 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
455 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
456 // CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
457 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
458 // CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
459 // CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
460 // CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
461 // CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
462 // CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
463 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
464 // CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
465 // CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
467 // CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
468 // CHECK-64-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
469 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
470 // CHECK-64-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
471 // CHECK-64-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
472 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
474 // CHECK-64-NEXT: br label [[IFCONT]]
476 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
477 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
478 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
479 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
481 // CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
482 // CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
483 // CHECK-64-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
484 // CHECK-64-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
485 // CHECK-64-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
486 // CHECK-64-NEXT: br label [[IFCONT4:%.*]]
488 // CHECK-64-NEXT: br label [[IFCONT4]]
489 // CHECK-64: ifcont4:
490 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
491 // CHECK-64-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
492 // CHECK-64-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
494 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
495 // CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
496 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
497 // CHECK-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
498 // CHECK-64-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
499 // CHECK-64-NEXT: br label [[IFCONT8:%.*]]
501 // CHECK-64-NEXT: br label [[IFCONT8]]
502 // CHECK-64: ifcont8:
503 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
504 // CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
505 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
506 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
508 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
509 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
510 // CHECK-64-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8
511 // CHECK-64-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
512 // CHECK-64-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
513 // CHECK-64-NEXT: br label [[IFCONT12:%.*]]
515 // CHECK-64-NEXT: br label [[IFCONT12]]
516 // CHECK-64: ifcont12:
517 // CHECK-64-NEXT: ret void
520 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
521 // CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
522 // CHECK-64-NEXT: entry:
523 // CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
524 // CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
525 // CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
526 // CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
527 // CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
528 // CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
529 // CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
530 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
531 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
532 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
533 // CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
534 // CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
535 // CHECK-64: user_code.entry:
536 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
537 // CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
538 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
539 // CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
540 // CHECK-64-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 8
541 // CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
542 // CHECK-64-NEXT: call void @__kmpc_target_deinit()
543 // CHECK-64-NEXT: ret void
544 // CHECK-64: worker.exit:
545 // CHECK-64-NEXT: ret void
548 // CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
549 // CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
550 // CHECK-64-NEXT: entry:
551 // CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
552 // CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
553 // CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
554 // CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
555 // CHECK-64-NEXT: [[A1:%.*]] = alloca i32, align 4
556 // CHECK-64-NEXT: [[B2:%.*]] = alloca i16, align 2
557 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 8
558 // CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
559 // CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
560 // CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
561 // CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
562 // CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
563 // CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
564 // CHECK-64-NEXT: store i32 0, ptr [[A1]], align 4
565 // CHECK-64-NEXT: store i16 -32768, ptr [[B2]], align 2
566 // CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
567 // CHECK-64-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
568 // CHECK-64-NEXT: store i32 [[OR]], ptr [[A1]], align 4
569 // CHECK-64-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
570 // CHECK-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
571 // CHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
572 // CHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
573 // CHECK-64: cond.true:
574 // CHECK-64-NEXT: br label [[COND_END:%.*]]
575 // CHECK-64: cond.false:
576 // CHECK-64-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
577 // CHECK-64-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
578 // CHECK-64-NEXT: br label [[COND_END]]
579 // CHECK-64: cond.end:
580 // CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
581 // CHECK-64-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
582 // CHECK-64-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
583 // CHECK-64-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
584 // CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
585 // CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
586 // CHECK-64-NEXT: store ptr [[A1]], ptr [[TMP7]], align 8
587 // CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
588 // CHECK-64-NEXT: store ptr [[B2]], ptr [[TMP8]], align 8
589 // CHECK-64-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP6]], i32 2, i64 16, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
590 // CHECK-64-NEXT: [[TMP10:%.*]] = icmp eq i32 [[TMP9]], 1
591 // CHECK-64-NEXT: br i1 [[TMP10]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
592 // CHECK-64: .omp.reduction.then:
593 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP0]], align 4
594 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[A1]], align 4
595 // CHECK-64-NEXT: [[OR5:%.*]] = or i32 [[TMP11]], [[TMP12]]
596 // CHECK-64-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
597 // CHECK-64-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
598 // CHECK-64-NEXT: [[CONV6:%.*]] = sext i16 [[TMP13]] to i32
599 // CHECK-64-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
600 // CHECK-64-NEXT: [[CONV7:%.*]] = sext i16 [[TMP14]] to i32
601 // CHECK-64-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
602 // CHECK-64-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
603 // CHECK-64: cond.true9:
604 // CHECK-64-NEXT: [[TMP15:%.*]] = load i16, ptr [[TMP1]], align 2
605 // CHECK-64-NEXT: br label [[COND_END11:%.*]]
606 // CHECK-64: cond.false10:
607 // CHECK-64-NEXT: [[TMP16:%.*]] = load i16, ptr [[B2]], align 2
608 // CHECK-64-NEXT: br label [[COND_END11]]
609 // CHECK-64: cond.end11:
610 // CHECK-64-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP15]], [[COND_TRUE9]] ], [ [[TMP16]], [[COND_FALSE10]] ]
611 // CHECK-64-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
612 // CHECK-64-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP6]])
613 // CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
614 // CHECK-64: .omp.reduction.done:
615 // CHECK-64-NEXT: ret void
618 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
619 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
620 // CHECK-64-NEXT: entry:
621 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
622 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
623 // CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
624 // CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
625 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 8
626 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
627 // CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
628 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
629 // CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
630 // CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
631 // CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
632 // CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
633 // CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
634 // CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
635 // CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
636 // CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
637 // CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
638 // CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
639 // CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i64 1
640 // CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
641 // CHECK-64-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
642 // CHECK-64-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
643 // CHECK-64-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
644 // CHECK-64-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
645 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i64 1
646 // CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
647 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
648 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
649 // CHECK-64-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8
650 // CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
651 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i64 1
652 // CHECK-64-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
653 // CHECK-64-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
654 // CHECK-64-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
655 // CHECK-64-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
656 // CHECK-64-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
657 // CHECK-64-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
658 // CHECK-64-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
659 // CHECK-64-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i64 1
660 // CHECK-64-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
661 // CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 8
662 // CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
663 // CHECK-64-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
664 // CHECK-64-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
665 // CHECK-64-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
666 // CHECK-64-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
667 // CHECK-64-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
668 // CHECK-64-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
669 // CHECK-64-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
670 // CHECK-64-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
671 // CHECK-64-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
672 // CHECK-64-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
673 // CHECK-64-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
674 // CHECK-64-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
676 // CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
677 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
679 // CHECK-64-NEXT: br label [[IFCONT]]
681 // CHECK-64-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
682 // CHECK-64-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
683 // CHECK-64-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
684 // CHECK-64-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
686 // CHECK-64-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
687 // CHECK-64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 8
688 // CHECK-64-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
689 // CHECK-64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8
690 // CHECK-64-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
691 // CHECK-64-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
692 // CHECK-64-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
693 // CHECK-64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 8
694 // CHECK-64-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
695 // CHECK-64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 8
696 // CHECK-64-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
697 // CHECK-64-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
698 // CHECK-64-NEXT: br label [[IFCONT7:%.*]]
700 // CHECK-64-NEXT: br label [[IFCONT7]]
701 // CHECK-64: ifcont7:
702 // CHECK-64-NEXT: ret void
705 // CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
706 // CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
707 // CHECK-64-NEXT: entry:
708 // CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
709 // CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
710 // CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
711 // CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
712 // CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
713 // CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
714 // CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
715 // CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
716 // CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
717 // CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
718 // CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
719 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
720 // CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
721 // CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
723 // CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
724 // CHECK-64-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
725 // CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
726 // CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
727 // CHECK-64-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
728 // CHECK-64-NEXT: br label [[IFCONT:%.*]]
730 // CHECK-64-NEXT: br label [[IFCONT]]
732 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
733 // CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
734 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
735 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
737 // CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
738 // CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
739 // CHECK-64-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
740 // CHECK-64-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
741 // CHECK-64-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
742 // CHECK-64-NEXT: br label [[IFCONT4:%.*]]
744 // CHECK-64-NEXT: br label [[IFCONT4]]
745 // CHECK-64: ifcont4:
746 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
747 // CHECK-64-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
748 // CHECK-64-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
750 // CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
751 // CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
752 // CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
753 // CHECK-64-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
754 // CHECK-64-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
755 // CHECK-64-NEXT: br label [[IFCONT8:%.*]]
757 // CHECK-64-NEXT: br label [[IFCONT8]]
758 // CHECK-64: ifcont8:
759 // CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
760 // CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
761 // CHECK-64-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
762 // CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
764 // CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
765 // CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
766 // CHECK-64-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8
767 // CHECK-64-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
768 // CHECK-64-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
769 // CHECK-64-NEXT: br label [[IFCONT12:%.*]]
771 // CHECK-64-NEXT: br label [[IFCONT12]]
772 // CHECK-64: ifcont12:
773 // CHECK-64-NEXT: ret void
776 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
777 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
778 // CHECK-32-NEXT: entry:
779 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
780 // CHECK-32-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
781 // CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
782 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
783 // CHECK-32-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
784 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
785 // CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
786 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
787 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
788 // CHECK-32: user_code.entry:
789 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
790 // CHECK-32-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
791 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
792 // CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
793 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
794 // CHECK-32-NEXT: ret void
795 // CHECK-32: worker.exit:
796 // CHECK-32-NEXT: ret void
799 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
800 // CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
801 // CHECK-32-NEXT: entry:
802 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
803 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
804 // CHECK-32-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
805 // CHECK-32-NEXT: [[E1:%.*]] = alloca double, align 8
806 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
807 // CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
808 // CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
809 // CHECK-32-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
810 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
811 // CHECK-32-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
812 // CHECK-32-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
813 // CHECK-32-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
814 // CHECK-32-NEXT: store double [[ADD]], ptr [[E1]], align 8
815 // CHECK-32-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
816 // CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
817 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
818 // CHECK-32-NEXT: store ptr [[E1]], ptr [[TMP4]], align 4
819 // CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
820 // CHECK-32-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP5]], 1
821 // CHECK-32-NEXT: br i1 [[TMP6]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
822 // CHECK-32: .omp.reduction.then:
823 // CHECK-32-NEXT: [[TMP7:%.*]] = load double, ptr [[TMP0]], align 8
824 // CHECK-32-NEXT: [[TMP8:%.*]] = load double, ptr [[E1]], align 8
825 // CHECK-32-NEXT: [[ADD2:%.*]] = fadd double [[TMP7]], [[TMP8]]
826 // CHECK-32-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
827 // CHECK-32-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP3]])
828 // CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
829 // CHECK-32: .omp.reduction.done:
830 // CHECK-32-NEXT: ret void
833 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
834 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
835 // CHECK-32-NEXT: entry:
836 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
837 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
838 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
839 // CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
840 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 4
841 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
842 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
843 // CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
844 // CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
845 // CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
846 // CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
847 // CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
848 // CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
849 // CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
850 // CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
851 // CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
852 // CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
853 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i32 1
854 // CHECK-32-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
855 // CHECK-32-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
856 // CHECK-32-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
857 // CHECK-32-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
858 // CHECK-32-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
859 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i32 1
860 // CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
861 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
862 // CHECK-32-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
863 // CHECK-32-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
864 // CHECK-32-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
865 // CHECK-32-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
866 // CHECK-32-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
867 // CHECK-32-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
868 // CHECK-32-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
869 // CHECK-32-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
870 // CHECK-32-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
871 // CHECK-32-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
872 // CHECK-32-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
873 // CHECK-32-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
874 // CHECK-32-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
876 // CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
877 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
879 // CHECK-32-NEXT: br label [[IFCONT]]
881 // CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
882 // CHECK-32-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
883 // CHECK-32-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
884 // CHECK-32-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
886 // CHECK-32-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
887 // CHECK-32-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 4
888 // CHECK-32-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
889 // CHECK-32-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 4
890 // CHECK-32-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
891 // CHECK-32-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
892 // CHECK-32-NEXT: br label [[IFCONT6:%.*]]
894 // CHECK-32-NEXT: br label [[IFCONT6]]
895 // CHECK-32: ifcont6:
896 // CHECK-32-NEXT: ret void
899 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
900 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
901 // CHECK-32-NEXT: entry:
902 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
903 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
904 // CHECK-32-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
905 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
906 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
907 // CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
908 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
909 // CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
910 // CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
911 // CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
912 // CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
913 // CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
914 // CHECK-32-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
915 // CHECK-32-NEXT: br label [[PRECOND:%.*]]
916 // CHECK-32: precond:
917 // CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
918 // CHECK-32-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
919 // CHECK-32-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
921 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
922 // CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
923 // CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
925 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
926 // CHECK-32-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4
927 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
928 // CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
929 // CHECK-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
930 // CHECK-32-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
931 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
933 // CHECK-32-NEXT: br label [[IFCONT]]
935 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
936 // CHECK-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
937 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
938 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
940 // CHECK-32-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
941 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
942 // CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
943 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
944 // CHECK-32-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
945 // CHECK-32-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
946 // CHECK-32-NEXT: br label [[IFCONT4:%.*]]
948 // CHECK-32-NEXT: br label [[IFCONT4]]
949 // CHECK-32: ifcont4:
950 // CHECK-32-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
951 // CHECK-32-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
952 // CHECK-32-NEXT: br label [[PRECOND]]
954 // CHECK-32-NEXT: ret void
957 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
958 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
959 // CHECK-32-NEXT: entry:
960 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
961 // CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
962 // CHECK-32-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
963 // CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
964 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
965 // CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
966 // CHECK-32-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
967 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
968 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
969 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
970 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
971 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
972 // CHECK-32: user_code.entry:
973 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
974 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
975 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
976 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
977 // CHECK-32-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
978 // CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
979 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
980 // CHECK-32-NEXT: ret void
981 // CHECK-32: worker.exit:
982 // CHECK-32-NEXT: ret void
985 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
986 // CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
987 // CHECK-32-NEXT: entry:
988 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
989 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
990 // CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
991 // CHECK-32-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
992 // CHECK-32-NEXT: [[C1:%.*]] = alloca i8, align 1
993 // CHECK-32-NEXT: [[D2:%.*]] = alloca float, align 4
994 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
995 // CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
996 // CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
997 // CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
998 // CHECK-32-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
999 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
1000 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
1001 // CHECK-32-NEXT: store i8 0, ptr [[C1]], align 1
1002 // CHECK-32-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
1003 // CHECK-32-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
1004 // CHECK-32-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
1005 // CHECK-32-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
1006 // CHECK-32-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
1007 // CHECK-32-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
1008 // CHECK-32-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
1009 // CHECK-32-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
1010 // CHECK-32-NEXT: store float [[MUL]], ptr [[D2]], align 4
1011 // CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
1012 // CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
1013 // CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1014 // CHECK-32-NEXT: store ptr [[C1]], ptr [[TMP6]], align 4
1015 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1016 // CHECK-32-NEXT: store ptr [[D2]], ptr [[TMP7]], align 4
1017 // CHECK-32-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP5]], i32 2, i32 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
1018 // CHECK-32-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 1
1019 // CHECK-32-NEXT: br i1 [[TMP9]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1020 // CHECK-32: .omp.reduction.then:
1021 // CHECK-32-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP0]], align 1
1022 // CHECK-32-NEXT: [[CONV4:%.*]] = sext i8 [[TMP10]] to i32
1023 // CHECK-32-NEXT: [[TMP11:%.*]] = load i8, ptr [[C1]], align 1
1024 // CHECK-32-NEXT: [[CONV5:%.*]] = sext i8 [[TMP11]] to i32
1025 // CHECK-32-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
1026 // CHECK-32-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
1027 // CHECK-32-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
1028 // CHECK-32-NEXT: [[TMP12:%.*]] = load float, ptr [[TMP1]], align 4
1029 // CHECK-32-NEXT: [[TMP13:%.*]] = load float, ptr [[D2]], align 4
1030 // CHECK-32-NEXT: [[MUL8:%.*]] = fmul float [[TMP12]], [[TMP13]]
1031 // CHECK-32-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
1032 // CHECK-32-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP5]])
1033 // CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1034 // CHECK-32: .omp.reduction.done:
1035 // CHECK-32-NEXT: ret void
1038 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
1039 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1040 // CHECK-32-NEXT: entry:
1041 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1042 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1043 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1044 // CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1045 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1046 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
1047 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
1048 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1049 // CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1050 // CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1051 // CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1052 // CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1053 // CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1054 // CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1055 // CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1056 // CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1057 // CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1058 // CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1059 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1060 // CHECK-32-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
1061 // CHECK-32-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
1062 // CHECK-32-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
1063 // CHECK-32-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
1064 // CHECK-32-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
1065 // CHECK-32-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
1066 // CHECK-32-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
1067 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1068 // CHECK-32-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1069 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1070 // CHECK-32-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1071 // CHECK-32-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 4
1072 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1073 // CHECK-32-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i32 1
1074 // CHECK-32-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
1075 // CHECK-32-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
1076 // CHECK-32-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
1077 // CHECK-32-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
1078 // CHECK-32-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
1079 // CHECK-32-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i32 1
1080 // CHECK-32-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
1081 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 4
1082 // CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
1083 // CHECK-32-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
1084 // CHECK-32-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1085 // CHECK-32-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
1086 // CHECK-32-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
1087 // CHECK-32-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
1088 // CHECK-32-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
1089 // CHECK-32-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
1090 // CHECK-32-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
1091 // CHECK-32-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
1092 // CHECK-32-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
1093 // CHECK-32-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
1094 // CHECK-32-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
1096 // CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
1097 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1099 // CHECK-32-NEXT: br label [[IFCONT]]
1100 // CHECK-32: ifcont:
1101 // CHECK-32-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
1102 // CHECK-32-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1103 // CHECK-32-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
1104 // CHECK-32-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1106 // CHECK-32-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1107 // CHECK-32-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
1108 // CHECK-32-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1109 // CHECK-32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
1110 // CHECK-32-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
1111 // CHECK-32-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
1112 // CHECK-32-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1113 // CHECK-32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
1114 // CHECK-32-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1115 // CHECK-32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
1116 // CHECK-32-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
1117 // CHECK-32-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
1118 // CHECK-32-NEXT: br label [[IFCONT7:%.*]]
1120 // CHECK-32-NEXT: br label [[IFCONT7]]
1121 // CHECK-32: ifcont7:
1122 // CHECK-32-NEXT: ret void
1125 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
1126 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1127 // CHECK-32-NEXT: entry:
1128 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1129 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1130 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1131 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1132 // CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1133 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1134 // CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1135 // CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1136 // CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1137 // CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1138 // CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1139 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1140 // CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1141 // CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1143 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1144 // CHECK-32-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
1145 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1146 // CHECK-32-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
1147 // CHECK-32-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
1148 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1150 // CHECK-32-NEXT: br label [[IFCONT]]
1151 // CHECK-32: ifcont:
1152 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1153 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1154 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
1155 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1157 // CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1158 // CHECK-32-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1159 // CHECK-32-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
1160 // CHECK-32-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
1161 // CHECK-32-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
1162 // CHECK-32-NEXT: br label [[IFCONT4:%.*]]
1164 // CHECK-32-NEXT: br label [[IFCONT4]]
1165 // CHECK-32: ifcont4:
1166 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1167 // CHECK-32-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1168 // CHECK-32-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1170 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1171 // CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1172 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1173 // CHECK-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
1174 // CHECK-32-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
1175 // CHECK-32-NEXT: br label [[IFCONT8:%.*]]
1177 // CHECK-32-NEXT: br label [[IFCONT8]]
1178 // CHECK-32: ifcont8:
1179 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1180 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1181 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
1182 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1183 // CHECK-32: then10:
1184 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1185 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1186 // CHECK-32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
1187 // CHECK-32-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
1188 // CHECK-32-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
1189 // CHECK-32-NEXT: br label [[IFCONT12:%.*]]
1190 // CHECK-32: else11:
1191 // CHECK-32-NEXT: br label [[IFCONT12]]
1192 // CHECK-32: ifcont12:
1193 // CHECK-32-NEXT: ret void
1196 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
1197 // CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
1198 // CHECK-32-NEXT: entry:
1199 // CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1200 // CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1201 // CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1202 // CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
1203 // CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1204 // CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1205 // CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1206 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1207 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1208 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
1209 // CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1210 // CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1211 // CHECK-32: user_code.entry:
1212 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1213 // CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1214 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
1215 // CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1216 // CHECK-32-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
1217 // CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
1218 // CHECK-32-NEXT: call void @__kmpc_target_deinit()
1219 // CHECK-32-NEXT: ret void
1220 // CHECK-32: worker.exit:
1221 // CHECK-32-NEXT: ret void
1224 // CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
1225 // CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
1226 // CHECK-32-NEXT: entry:
1227 // CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1228 // CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1229 // CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1230 // CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1231 // CHECK-32-NEXT: [[A1:%.*]] = alloca i32, align 4
1232 // CHECK-32-NEXT: [[B2:%.*]] = alloca i16, align 2
1233 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
1234 // CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1235 // CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1236 // CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1237 // CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1238 // CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1239 // CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1240 // CHECK-32-NEXT: store i32 0, ptr [[A1]], align 4
1241 // CHECK-32-NEXT: store i16 -32768, ptr [[B2]], align 2
1242 // CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
1243 // CHECK-32-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
1244 // CHECK-32-NEXT: store i32 [[OR]], ptr [[A1]], align 4
1245 // CHECK-32-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
1246 // CHECK-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
1247 // CHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
1248 // CHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1249 // CHECK-32: cond.true:
1250 // CHECK-32-NEXT: br label [[COND_END:%.*]]
1251 // CHECK-32: cond.false:
1252 // CHECK-32-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
1253 // CHECK-32-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
1254 // CHECK-32-NEXT: br label [[COND_END]]
1255 // CHECK-32: cond.end:
1256 // CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
1257 // CHECK-32-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
1258 // CHECK-32-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
1259 // CHECK-32-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
1260 // CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
1261 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1262 // CHECK-32-NEXT: store ptr [[A1]], ptr [[TMP7]], align 4
1263 // CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1264 // CHECK-32-NEXT: store ptr [[B2]], ptr [[TMP8]], align 4
1265 // CHECK-32-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP6]], i32 2, i32 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
1266 // CHECK-32-NEXT: [[TMP10:%.*]] = icmp eq i32 [[TMP9]], 1
1267 // CHECK-32-NEXT: br i1 [[TMP10]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1268 // CHECK-32: .omp.reduction.then:
1269 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP0]], align 4
1270 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[A1]], align 4
1271 // CHECK-32-NEXT: [[OR5:%.*]] = or i32 [[TMP11]], [[TMP12]]
1272 // CHECK-32-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
1273 // CHECK-32-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
1274 // CHECK-32-NEXT: [[CONV6:%.*]] = sext i16 [[TMP13]] to i32
1275 // CHECK-32-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
1276 // CHECK-32-NEXT: [[CONV7:%.*]] = sext i16 [[TMP14]] to i32
1277 // CHECK-32-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
1278 // CHECK-32-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
1279 // CHECK-32: cond.true9:
1280 // CHECK-32-NEXT: [[TMP15:%.*]] = load i16, ptr [[TMP1]], align 2
1281 // CHECK-32-NEXT: br label [[COND_END11:%.*]]
1282 // CHECK-32: cond.false10:
1283 // CHECK-32-NEXT: [[TMP16:%.*]] = load i16, ptr [[B2]], align 2
1284 // CHECK-32-NEXT: br label [[COND_END11]]
1285 // CHECK-32: cond.end11:
1286 // CHECK-32-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP15]], [[COND_TRUE9]] ], [ [[TMP16]], [[COND_FALSE10]] ]
1287 // CHECK-32-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
1288 // CHECK-32-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP6]])
1289 // CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1290 // CHECK-32: .omp.reduction.done:
1291 // CHECK-32-NEXT: ret void
1294 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
1295 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1296 // CHECK-32-NEXT: entry:
1297 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1298 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1299 // CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1300 // CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1301 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1302 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
1303 // CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
1304 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1305 // CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1306 // CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1307 // CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1308 // CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1309 // CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1310 // CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1311 // CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1312 // CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1313 // CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1314 // CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1315 // CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1316 // CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
1317 // CHECK-32-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
1318 // CHECK-32-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
1319 // CHECK-32-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
1320 // CHECK-32-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
1321 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1322 // CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1323 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1324 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1325 // CHECK-32-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 4
1326 // CHECK-32-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1327 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
1328 // CHECK-32-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
1329 // CHECK-32-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
1330 // CHECK-32-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
1331 // CHECK-32-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
1332 // CHECK-32-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
1333 // CHECK-32-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
1334 // CHECK-32-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
1335 // CHECK-32-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
1336 // CHECK-32-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
1337 // CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 4
1338 // CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
1339 // CHECK-32-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
1340 // CHECK-32-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1341 // CHECK-32-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
1342 // CHECK-32-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
1343 // CHECK-32-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
1344 // CHECK-32-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
1345 // CHECK-32-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
1346 // CHECK-32-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
1347 // CHECK-32-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
1348 // CHECK-32-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
1349 // CHECK-32-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
1350 // CHECK-32-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
1352 // CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
1353 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1355 // CHECK-32-NEXT: br label [[IFCONT]]
1356 // CHECK-32: ifcont:
1357 // CHECK-32-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
1358 // CHECK-32-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1359 // CHECK-32-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
1360 // CHECK-32-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1362 // CHECK-32-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1363 // CHECK-32-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
1364 // CHECK-32-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1365 // CHECK-32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
1366 // CHECK-32-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
1367 // CHECK-32-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
1368 // CHECK-32-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1369 // CHECK-32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
1370 // CHECK-32-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1371 // CHECK-32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
1372 // CHECK-32-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
1373 // CHECK-32-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
1374 // CHECK-32-NEXT: br label [[IFCONT7:%.*]]
1376 // CHECK-32-NEXT: br label [[IFCONT7]]
1377 // CHECK-32: ifcont7:
1378 // CHECK-32-NEXT: ret void
1381 // CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
1382 // CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1383 // CHECK-32-NEXT: entry:
1384 // CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1385 // CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1386 // CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1387 // CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1388 // CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1389 // CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1390 // CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1391 // CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1392 // CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1393 // CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1394 // CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1395 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1396 // CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1397 // CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1399 // CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1400 // CHECK-32-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
1401 // CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1402 // CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
1403 // CHECK-32-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
1404 // CHECK-32-NEXT: br label [[IFCONT:%.*]]
1406 // CHECK-32-NEXT: br label [[IFCONT]]
1407 // CHECK-32: ifcont:
1408 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1409 // CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1410 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
1411 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1413 // CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1414 // CHECK-32-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1415 // CHECK-32-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
1416 // CHECK-32-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
1417 // CHECK-32-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
1418 // CHECK-32-NEXT: br label [[IFCONT4:%.*]]
1420 // CHECK-32-NEXT: br label [[IFCONT4]]
1421 // CHECK-32: ifcont4:
1422 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1423 // CHECK-32-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1424 // CHECK-32-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1426 // CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1427 // CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1428 // CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1429 // CHECK-32-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
1430 // CHECK-32-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
1431 // CHECK-32-NEXT: br label [[IFCONT8:%.*]]
1433 // CHECK-32-NEXT: br label [[IFCONT8]]
1434 // CHECK-32: ifcont8:
1435 // CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1436 // CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1437 // CHECK-32-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
1438 // CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1439 // CHECK-32: then10:
1440 // CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1441 // CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1442 // CHECK-32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
1443 // CHECK-32-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
1444 // CHECK-32-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
1445 // CHECK-32-NEXT: br label [[IFCONT12:%.*]]
1446 // CHECK-32: else11:
1447 // CHECK-32-NEXT: br label [[IFCONT12]]
1448 // CHECK-32: ifcont12:
1449 // CHECK-32-NEXT: ret void
1452 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
1453 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
1454 // CHECK-32-EX-NEXT: entry:
1455 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1456 // CHECK-32-EX-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
1457 // CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
1458 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1459 // CHECK-32-EX-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
1460 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
1461 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
1462 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
1463 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1464 // CHECK-32-EX: user_code.entry:
1465 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
1466 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1467 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
1468 // CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
1469 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1470 // CHECK-32-EX-NEXT: ret void
1471 // CHECK-32-EX: worker.exit:
1472 // CHECK-32-EX-NEXT: ret void
1475 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
1476 // CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
1477 // CHECK-32-EX-NEXT: entry:
1478 // CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1479 // CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1480 // CHECK-32-EX-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
1481 // CHECK-32-EX-NEXT: [[E1:%.*]] = alloca double, align 8
1482 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
1483 // CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1484 // CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1485 // CHECK-32-EX-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
1486 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
1487 // CHECK-32-EX-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
1488 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
1489 // CHECK-32-EX-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
1490 // CHECK-32-EX-NEXT: store double [[ADD]], ptr [[E1]], align 8
1491 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
1492 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
1493 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1494 // CHECK-32-EX-NEXT: store ptr [[E1]], ptr [[TMP4]], align 4
1495 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
1496 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = icmp eq i32 [[TMP5]], 1
1497 // CHECK-32-EX-NEXT: br i1 [[TMP6]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1498 // CHECK-32-EX: .omp.reduction.then:
1499 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load double, ptr [[TMP0]], align 8
1500 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load double, ptr [[E1]], align 8
1501 // CHECK-32-EX-NEXT: [[ADD2:%.*]] = fadd double [[TMP7]], [[TMP8]]
1502 // CHECK-32-EX-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
1503 // CHECK-32-EX-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP3]])
1504 // CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1505 // CHECK-32-EX: .omp.reduction.done:
1506 // CHECK-32-EX-NEXT: ret void
1509 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
1510 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
1511 // CHECK-32-EX-NEXT: entry:
1512 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1513 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1514 // CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1515 // CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1516 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 4
1517 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
1518 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1519 // CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1520 // CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1521 // CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1522 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1523 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1524 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1525 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1526 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
1527 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1528 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1529 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i32 1
1530 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
1531 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
1532 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
1533 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
1534 // CHECK-32-EX-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
1535 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i32 1
1536 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1537 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1538 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
1539 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
1540 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1541 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
1542 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
1543 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
1544 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
1545 // CHECK-32-EX-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
1546 // CHECK-32-EX-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
1547 // CHECK-32-EX-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
1548 // CHECK-32-EX-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
1549 // CHECK-32-EX-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
1550 // CHECK-32-EX-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
1551 // CHECK-32-EX: then:
1552 // CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
1553 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1554 // CHECK-32-EX: else:
1555 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1556 // CHECK-32-EX: ifcont:
1557 // CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
1558 // CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1559 // CHECK-32-EX-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
1560 // CHECK-32-EX-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
1561 // CHECK-32-EX: then4:
1562 // CHECK-32-EX-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1563 // CHECK-32-EX-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 4
1564 // CHECK-32-EX-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
1565 // CHECK-32-EX-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 4
1566 // CHECK-32-EX-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
1567 // CHECK-32-EX-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
1568 // CHECK-32-EX-NEXT: br label [[IFCONT6:%.*]]
1569 // CHECK-32-EX: else5:
1570 // CHECK-32-EX-NEXT: br label [[IFCONT6]]
1571 // CHECK-32-EX: ifcont6:
1572 // CHECK-32-EX-NEXT: ret void
1575 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
1576 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1577 // CHECK-32-EX-NEXT: entry:
1578 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1579 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1580 // CHECK-32-EX-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
1581 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1582 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1583 // CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1584 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1585 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1586 // CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1587 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1588 // CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1589 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1590 // CHECK-32-EX-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
1591 // CHECK-32-EX-NEXT: br label [[PRECOND:%.*]]
1592 // CHECK-32-EX: precond:
1593 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
1594 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
1595 // CHECK-32-EX-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
1596 // CHECK-32-EX: body:
1597 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
1598 // CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1599 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1600 // CHECK-32-EX: then:
1601 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
1602 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4
1603 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
1604 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1605 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
1606 // CHECK-32-EX-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
1607 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1608 // CHECK-32-EX: else:
1609 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1610 // CHECK-32-EX: ifcont:
1611 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1612 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1613 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
1614 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1615 // CHECK-32-EX: then2:
1616 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1617 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
1618 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1619 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
1620 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
1621 // CHECK-32-EX-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
1622 // CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
1623 // CHECK-32-EX: else3:
1624 // CHECK-32-EX-NEXT: br label [[IFCONT4]]
1625 // CHECK-32-EX: ifcont4:
1626 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
1627 // CHECK-32-EX-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
1628 // CHECK-32-EX-NEXT: br label [[PRECOND]]
1629 // CHECK-32-EX: exit:
1630 // CHECK-32-EX-NEXT: ret void
1633 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
1634 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
1635 // CHECK-32-EX-NEXT: entry:
1636 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1637 // CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
1638 // CHECK-32-EX-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
1639 // CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
1640 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1641 // CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
1642 // CHECK-32-EX-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
1643 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
1644 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
1645 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
1646 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1647 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1648 // CHECK-32-EX: user_code.entry:
1649 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1650 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1651 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
1652 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1653 // CHECK-32-EX-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
1654 // CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
1655 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1656 // CHECK-32-EX-NEXT: ret void
1657 // CHECK-32-EX: worker.exit:
1658 // CHECK-32-EX-NEXT: ret void
1661 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
1662 // CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
1663 // CHECK-32-EX-NEXT: entry:
1664 // CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1665 // CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1666 // CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
1667 // CHECK-32-EX-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
1668 // CHECK-32-EX-NEXT: [[C1:%.*]] = alloca i8, align 1
1669 // CHECK-32-EX-NEXT: [[D2:%.*]] = alloca float, align 4
1670 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
1671 // CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1672 // CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1673 // CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
1674 // CHECK-32-EX-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
1675 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
1676 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
1677 // CHECK-32-EX-NEXT: store i8 0, ptr [[C1]], align 1
1678 // CHECK-32-EX-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
1679 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
1680 // CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
1681 // CHECK-32-EX-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
1682 // CHECK-32-EX-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
1683 // CHECK-32-EX-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
1684 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
1685 // CHECK-32-EX-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
1686 // CHECK-32-EX-NEXT: store float [[MUL]], ptr [[D2]], align 4
1687 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
1688 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4
1689 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1690 // CHECK-32-EX-NEXT: store ptr [[C1]], ptr [[TMP6]], align 4
1691 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1692 // CHECK-32-EX-NEXT: store ptr [[D2]], ptr [[TMP7]], align 4
1693 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP5]], i32 2, i32 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
1694 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 1
1695 // CHECK-32-EX-NEXT: br i1 [[TMP9]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1696 // CHECK-32-EX: .omp.reduction.then:
1697 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP0]], align 1
1698 // CHECK-32-EX-NEXT: [[CONV4:%.*]] = sext i8 [[TMP10]] to i32
1699 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i8, ptr [[C1]], align 1
1700 // CHECK-32-EX-NEXT: [[CONV5:%.*]] = sext i8 [[TMP11]] to i32
1701 // CHECK-32-EX-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
1702 // CHECK-32-EX-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
1703 // CHECK-32-EX-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
1704 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load float, ptr [[TMP1]], align 4
1705 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = load float, ptr [[D2]], align 4
1706 // CHECK-32-EX-NEXT: [[MUL8:%.*]] = fmul float [[TMP12]], [[TMP13]]
1707 // CHECK-32-EX-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
1708 // CHECK-32-EX-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP5]])
1709 // CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1710 // CHECK-32-EX: .omp.reduction.done:
1711 // CHECK-32-EX-NEXT: ret void
1714 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
1715 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1716 // CHECK-32-EX-NEXT: entry:
1717 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1718 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1719 // CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1720 // CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1721 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1722 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
1723 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
1724 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1725 // CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1726 // CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1727 // CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1728 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1729 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1730 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1731 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1732 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1733 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1734 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1735 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1736 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
1737 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
1738 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
1739 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
1740 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
1741 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
1742 // CHECK-32-EX-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
1743 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
1744 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1745 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
1746 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1747 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 4
1748 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1749 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i32 1
1750 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
1751 // CHECK-32-EX-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
1752 // CHECK-32-EX-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
1753 // CHECK-32-EX-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
1754 // CHECK-32-EX-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
1755 // CHECK-32-EX-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i32 1
1756 // CHECK-32-EX-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
1757 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 4
1758 // CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
1759 // CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
1760 // CHECK-32-EX-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
1761 // CHECK-32-EX-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
1762 // CHECK-32-EX-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
1763 // CHECK-32-EX-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
1764 // CHECK-32-EX-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
1765 // CHECK-32-EX-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
1766 // CHECK-32-EX-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
1767 // CHECK-32-EX-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
1768 // CHECK-32-EX-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
1769 // CHECK-32-EX-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
1770 // CHECK-32-EX-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
1771 // CHECK-32-EX: then:
1772 // CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
1773 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1774 // CHECK-32-EX: else:
1775 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1776 // CHECK-32-EX: ifcont:
1777 // CHECK-32-EX-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
1778 // CHECK-32-EX-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
1779 // CHECK-32-EX-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
1780 // CHECK-32-EX-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
1781 // CHECK-32-EX: then5:
1782 // CHECK-32-EX-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1783 // CHECK-32-EX-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
1784 // CHECK-32-EX-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1785 // CHECK-32-EX-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
1786 // CHECK-32-EX-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
1787 // CHECK-32-EX-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
1788 // CHECK-32-EX-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
1789 // CHECK-32-EX-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
1790 // CHECK-32-EX-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
1791 // CHECK-32-EX-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
1792 // CHECK-32-EX-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
1793 // CHECK-32-EX-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
1794 // CHECK-32-EX-NEXT: br label [[IFCONT7:%.*]]
1795 // CHECK-32-EX: else6:
1796 // CHECK-32-EX-NEXT: br label [[IFCONT7]]
1797 // CHECK-32-EX: ifcont7:
1798 // CHECK-32-EX-NEXT: ret void
1801 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
1802 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
1803 // CHECK-32-EX-NEXT: entry:
1804 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1805 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
1806 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1807 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1808 // CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
1809 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1810 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1811 // CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
1812 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
1813 // CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
1814 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1815 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1816 // CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1817 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
1818 // CHECK-32-EX: then:
1819 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1820 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
1821 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1822 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
1823 // CHECK-32-EX-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
1824 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
1825 // CHECK-32-EX: else:
1826 // CHECK-32-EX-NEXT: br label [[IFCONT]]
1827 // CHECK-32-EX: ifcont:
1828 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1829 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1830 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
1831 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
1832 // CHECK-32-EX: then2:
1833 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1834 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
1835 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
1836 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
1837 // CHECK-32-EX-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
1838 // CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
1839 // CHECK-32-EX: else3:
1840 // CHECK-32-EX-NEXT: br label [[IFCONT4]]
1841 // CHECK-32-EX: ifcont4:
1842 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1843 // CHECK-32-EX-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
1844 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
1845 // CHECK-32-EX: then6:
1846 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1847 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
1848 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
1849 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
1850 // CHECK-32-EX-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
1851 // CHECK-32-EX-NEXT: br label [[IFCONT8:%.*]]
1852 // CHECK-32-EX: else7:
1853 // CHECK-32-EX-NEXT: br label [[IFCONT8]]
1854 // CHECK-32-EX: ifcont8:
1855 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
1856 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
1857 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
1858 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
1859 // CHECK-32-EX: then10:
1860 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
1861 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
1862 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
1863 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
1864 // CHECK-32-EX-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
1865 // CHECK-32-EX-NEXT: br label [[IFCONT12:%.*]]
1866 // CHECK-32-EX: else11:
1867 // CHECK-32-EX-NEXT: br label [[IFCONT12]]
1868 // CHECK-32-EX: ifcont12:
1869 // CHECK-32-EX-NEXT: ret void
1872 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
1873 // CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
1874 // CHECK-32-EX-NEXT: entry:
1875 // CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
1876 // CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1877 // CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1878 // CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
1879 // CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
1880 // CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1881 // CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1882 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1883 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1884 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
1885 // CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
1886 // CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
1887 // CHECK-32-EX: user_code.entry:
1888 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
1889 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
1890 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
1891 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
1892 // CHECK-32-EX-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
1893 // CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
1894 // CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
1895 // CHECK-32-EX-NEXT: ret void
1896 // CHECK-32-EX: worker.exit:
1897 // CHECK-32-EX-NEXT: ret void
1900 // CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
1901 // CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
1902 // CHECK-32-EX-NEXT: entry:
1903 // CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
1904 // CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
1905 // CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
1906 // CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
1907 // CHECK-32-EX-NEXT: [[A1:%.*]] = alloca i32, align 4
1908 // CHECK-32-EX-NEXT: [[B2:%.*]] = alloca i16, align 2
1909 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
1910 // CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
1911 // CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
1912 // CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
1913 // CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
1914 // CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
1915 // CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
1916 // CHECK-32-EX-NEXT: store i32 0, ptr [[A1]], align 4
1917 // CHECK-32-EX-NEXT: store i16 -32768, ptr [[B2]], align 2
1918 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
1919 // CHECK-32-EX-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
1920 // CHECK-32-EX-NEXT: store i32 [[OR]], ptr [[A1]], align 4
1921 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
1922 // CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
1923 // CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
1924 // CHECK-32-EX-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
1925 // CHECK-32-EX: cond.true:
1926 // CHECK-32-EX-NEXT: br label [[COND_END:%.*]]
1927 // CHECK-32-EX: cond.false:
1928 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
1929 // CHECK-32-EX-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
1930 // CHECK-32-EX-NEXT: br label [[COND_END]]
1931 // CHECK-32-EX: cond.end:
1932 // CHECK-32-EX-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
1933 // CHECK-32-EX-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
1934 // CHECK-32-EX-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
1935 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
1936 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
1937 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
1938 // CHECK-32-EX-NEXT: store ptr [[A1]], ptr [[TMP7]], align 4
1939 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
1940 // CHECK-32-EX-NEXT: store ptr [[B2]], ptr [[TMP8]], align 4
1941 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP6]], i32 2, i32 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
1942 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = icmp eq i32 [[TMP9]], 1
1943 // CHECK-32-EX-NEXT: br i1 [[TMP10]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
1944 // CHECK-32-EX: .omp.reduction.then:
1945 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP0]], align 4
1946 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[A1]], align 4
1947 // CHECK-32-EX-NEXT: [[OR5:%.*]] = or i32 [[TMP11]], [[TMP12]]
1948 // CHECK-32-EX-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
1949 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
1950 // CHECK-32-EX-NEXT: [[CONV6:%.*]] = sext i16 [[TMP13]] to i32
1951 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
1952 // CHECK-32-EX-NEXT: [[CONV7:%.*]] = sext i16 [[TMP14]] to i32
1953 // CHECK-32-EX-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
1954 // CHECK-32-EX-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
1955 // CHECK-32-EX: cond.true9:
1956 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = load i16, ptr [[TMP1]], align 2
1957 // CHECK-32-EX-NEXT: br label [[COND_END11:%.*]]
1958 // CHECK-32-EX: cond.false10:
1959 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = load i16, ptr [[B2]], align 2
1960 // CHECK-32-EX-NEXT: br label [[COND_END11]]
1961 // CHECK-32-EX: cond.end11:
1962 // CHECK-32-EX-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP15]], [[COND_TRUE9]] ], [ [[TMP16]], [[COND_FALSE10]] ]
1963 // CHECK-32-EX-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
1964 // CHECK-32-EX-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP6]])
1965 // CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
1966 // CHECK-32-EX: .omp.reduction.done:
1967 // CHECK-32-EX-NEXT: ret void
1970 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
1971 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
1972 // CHECK-32-EX-NEXT: entry:
1973 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
1974 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
1975 // CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
1976 // CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
1977 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
1978 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
1979 // CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
1980 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
1981 // CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
1982 // CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
1983 // CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
1984 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
1985 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
1986 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
1987 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
1988 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
1989 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
1990 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
1991 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1992 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
1993 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
1994 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
1995 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
1996 // CHECK-32-EX-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
1997 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
1998 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
1999 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
2000 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
2001 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 4
2002 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
2003 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
2004 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
2005 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
2006 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
2007 // CHECK-32-EX-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
2008 // CHECK-32-EX-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
2009 // CHECK-32-EX-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
2010 // CHECK-32-EX-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
2011 // CHECK-32-EX-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
2012 // CHECK-32-EX-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
2013 // CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 4
2014 // CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
2015 // CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
2016 // CHECK-32-EX-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
2017 // CHECK-32-EX-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
2018 // CHECK-32-EX-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
2019 // CHECK-32-EX-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
2020 // CHECK-32-EX-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
2021 // CHECK-32-EX-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
2022 // CHECK-32-EX-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
2023 // CHECK-32-EX-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
2024 // CHECK-32-EX-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
2025 // CHECK-32-EX-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
2026 // CHECK-32-EX-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
2027 // CHECK-32-EX: then:
2028 // CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
2029 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
2030 // CHECK-32-EX: else:
2031 // CHECK-32-EX-NEXT: br label [[IFCONT]]
2032 // CHECK-32-EX: ifcont:
2033 // CHECK-32-EX-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
2034 // CHECK-32-EX-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
2035 // CHECK-32-EX-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
2036 // CHECK-32-EX-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
2037 // CHECK-32-EX: then5:
2038 // CHECK-32-EX-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
2039 // CHECK-32-EX-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
2040 // CHECK-32-EX-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
2041 // CHECK-32-EX-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
2042 // CHECK-32-EX-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
2043 // CHECK-32-EX-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
2044 // CHECK-32-EX-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
2045 // CHECK-32-EX-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
2046 // CHECK-32-EX-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
2047 // CHECK-32-EX-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
2048 // CHECK-32-EX-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
2049 // CHECK-32-EX-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
2050 // CHECK-32-EX-NEXT: br label [[IFCONT7:%.*]]
2051 // CHECK-32-EX: else6:
2052 // CHECK-32-EX-NEXT: br label [[IFCONT7]]
2053 // CHECK-32-EX: ifcont7:
2054 // CHECK-32-EX-NEXT: ret void
2057 // CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
2058 // CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
2059 // CHECK-32-EX-NEXT: entry:
2060 // CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
2061 // CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
2062 // CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
2063 // CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
2064 // CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
2065 // CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2066 // CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2067 // CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
2068 // CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
2069 // CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
2070 // CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
2071 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2072 // CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2073 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
2074 // CHECK-32-EX: then:
2075 // CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
2076 // CHECK-32-EX-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
2077 // CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2078 // CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
2079 // CHECK-32-EX-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
2080 // CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
2081 // CHECK-32-EX: else:
2082 // CHECK-32-EX-NEXT: br label [[IFCONT]]
2083 // CHECK-32-EX: ifcont:
2084 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2085 // CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
2086 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
2087 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
2088 // CHECK-32-EX: then2:
2089 // CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2090 // CHECK-32-EX-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
2091 // CHECK-32-EX-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
2092 // CHECK-32-EX-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
2093 // CHECK-32-EX-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
2094 // CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
2095 // CHECK-32-EX: else3:
2096 // CHECK-32-EX-NEXT: br label [[IFCONT4]]
2097 // CHECK-32-EX: ifcont4:
2098 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2099 // CHECK-32-EX-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
2100 // CHECK-32-EX-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
2101 // CHECK-32-EX: then6:
2102 // CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
2103 // CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
2104 // CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
2105 // CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
2106 // CHECK-32-EX-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
2107 // CHECK-32-EX-NEXT: br label [[IFCONT8:%.*]]
2108 // CHECK-32-EX: else7:
2109 // CHECK-32-EX-NEXT: br label [[IFCONT8]]
2110 // CHECK-32-EX: ifcont8:
2111 // CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
2112 // CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
2113 // CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
2114 // CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
2115 // CHECK-32-EX: then10:
2116 // CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
2117 // CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
2118 // CHECK-32-EX-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
2119 // CHECK-32-EX-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
2120 // CHECK-32-EX-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
2121 // CHECK-32-EX-NEXT: br label [[IFCONT12:%.*]]
2122 // CHECK-32-EX: else11:
2123 // CHECK-32-EX-NEXT: br label [[IFCONT12]]
2124 // CHECK-32-EX: ifcont12:
2125 // CHECK-32-EX-NEXT: ret void