1 // RUN: mlir-opt -convert-openmp-to-llvm -split-input-file %s | FileCheck %s
2 // RUN: mlir-opt -convert-to-llvm -split-input-file %s | FileCheck %s
4 // CHECK-LABEL: llvm.func @foo(i64, i64)
5 func.func private @foo(index, index)
7 // CHECK-LABEL: llvm.func @critical_block_arg
8 func.func @critical_block_arg() {
11 // CHECK-NEXT: ^[[BB0:.*]](%[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64):
12 ^bb0(%arg1: index, %arg2: index):
13 // CHECK-NEXT: llvm.call @foo(%[[ARG1]], %[[ARG2]]) : (i64, i64) -> ()
14 func.call @foo(%arg1, %arg2) : (index, index) -> ()
22 // CHECK: omp.critical.declare @[[MUTEX:.*]] hint(contended, speculative)
23 omp.critical.declare @mutex hint(contended, speculative)
25 // CHECK: llvm.func @critical_declare
26 func.func @critical_declare() {
27 // CHECK: omp.critical(@[[MUTEX]])
28 omp.critical(@mutex) {
36 // CHECK-LABEL: llvm.func @master_block_arg
37 func.func @master_block_arg() {
40 // CHECK-NEXT: ^[[BB0:.*]](%[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64):
41 ^bb0(%arg1: index, %arg2: index):
42 // CHECK-DAG: %[[CAST_ARG1:.*]] = builtin.unrealized_conversion_cast %[[ARG1]] : i64 to index
43 // CHECK-DAG: %[[CAST_ARG2:.*]] = builtin.unrealized_conversion_cast %[[ARG2]] : i64 to index
44 // CHECK-NEXT: "test.payload"(%[[CAST_ARG1]], %[[CAST_ARG2]]) : (index, index) -> ()
45 "test.payload"(%arg1, %arg2) : (index, index) -> ()
53 // CHECK-LABEL: llvm.func @branch_loop
54 func.func @branch_loop() {
55 %start = arith.constant 0 : index
56 %end = arith.constant 0 : index
57 // CHECK: omp.parallel
59 // CHECK-NEXT: llvm.br ^[[BB1:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64
60 cf.br ^bb1(%start, %end : index, index)
61 // CHECK-NEXT: ^[[BB1]](%[[ARG1:[0-9]+]]: i64, %[[ARG2:[0-9]+]]: i64):{{.*}}
62 ^bb1(%0: index, %1: index):
63 // CHECK-NEXT: %[[CMP:[0-9]+]] = llvm.icmp "slt" %[[ARG1]], %[[ARG2]] : i64
64 %2 = arith.cmpi slt, %0, %1 : index
65 // CHECK-NEXT: llvm.cond_br %[[CMP]], ^[[BB2:.*]](%{{[0-9]+}}, %{{[0-9]+}} : i64, i64), ^[[BB3:.*]]
66 cf.cond_br %2, ^bb2(%end, %end : index, index), ^bb3
67 // CHECK-NEXT: ^[[BB2]](%[[ARG3:[0-9]+]]: i64, %[[ARG4:[0-9]+]]: i64):
68 ^bb2(%3: index, %4: index):
69 // CHECK-NEXT: llvm.br ^[[BB1]](%[[ARG3]], %[[ARG4]] : i64, i64)
70 cf.br ^bb1(%3, %4 : index, index)
71 // CHECK-NEXT: ^[[BB3]]:
84 // CHECK-LABEL: @wsloop
85 // CHECK: (%[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64, %[[ARG3:.*]]: i64, %[[ARG4:.*]]: i64, %[[ARG5:.*]]: i64)
86 func.func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) {
87 // CHECK: omp.parallel
89 // CHECK: omp.wsloop {
91 // CHECK: omp.loop_nest (%[[ARG6:.*]], %[[ARG7:.*]]) : i64 = (%[[ARG0]], %[[ARG1]]) to (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[ARG5]]) {
92 omp.loop_nest (%arg6, %arg7) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) {
93 // CHECK-DAG: %[[CAST_ARG6:.*]] = builtin.unrealized_conversion_cast %[[ARG6]] : i64 to index
94 // CHECK-DAG: %[[CAST_ARG7:.*]] = builtin.unrealized_conversion_cast %[[ARG7]] : i64 to index
95 // CHECK: "test.payload"(%[[CAST_ARG6]], %[[CAST_ARG7]]) : (index, index) -> ()
96 "test.payload"(%arg6, %arg7) : (index, index) -> ()
107 // CHECK-LABEL: @atomic_write
108 // CHECK: (%[[ARG0:.*]]: !llvm.ptr)
109 // CHECK: %[[VAL0:.*]] = llvm.mlir.constant(1 : i32) : i32
110 // CHECK: omp.atomic.write %[[ARG0]] = %[[VAL0]] memory_order(relaxed) : !llvm.ptr, i32
111 func.func @atomic_write(%a: !llvm.ptr) -> () {
112 %1 = arith.constant 1 : i32
113 omp.atomic.write %a = %1 hint(none) memory_order(relaxed) : !llvm.ptr, i32
119 // CHECK-LABEL: @atomic_read
120 // CHECK: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr)
121 // CHECK: omp.atomic.read %[[ARG1]] = %[[ARG0]] hint(contended) memory_order(acquire) : !llvm.ptr
122 func.func @atomic_read(%a: !llvm.ptr, %b: !llvm.ptr) -> () {
123 omp.atomic.read %b = %a memory_order(acquire) hint(contended) : !llvm.ptr, !llvm.ptr, i32
129 func.func @atomic_update() {
130 %0 = llvm.mlir.addressof @_QFsEc : !llvm.ptr
131 omp.atomic.update %0 : !llvm.ptr {
133 %1 = arith.constant 1 : i32
134 %2 = arith.addi %arg0, %1 : i32
139 llvm.mlir.global internal @_QFsEc() : i32 {
140 %0 = arith.constant 10 : i32
144 // CHECK-LABEL: @atomic_update
145 // CHECK: %[[GLOBAL_VAR:.*]] = llvm.mlir.addressof @_QFsEc : !llvm.ptr
146 // CHECK: omp.atomic.update %[[GLOBAL_VAR]] : !llvm.ptr {
147 // CHECK: ^bb0(%[[IN_VAL:.*]]: i32):
148 // CHECK: %[[CONST_1:.*]] = llvm.mlir.constant(1 : i32) : i32
149 // CHECK: %[[OUT_VAL:.*]] = llvm.add %[[IN_VAL]], %[[CONST_1]] : i32
150 // CHECK: omp.yield(%[[OUT_VAL]] : i32)
155 // CHECK-LABEL: @threadprivate
156 // CHECK: (%[[ARG0:.*]]: !llvm.ptr)
157 // CHECK: %[[VAL0:.*]] = omp.threadprivate %[[ARG0]] : !llvm.ptr -> !llvm.ptr
158 func.func @threadprivate(%a: !llvm.ptr) -> () {
159 %1 = omp.threadprivate %a : !llvm.ptr -> !llvm.ptr
165 // CHECK: llvm.func @loop_nest_block_arg(%[[LOWER:.*]]: i32, %[[UPPER:.*]]: i32, %[[ITER:.*]]: i64) {
167 // CHECK-NEXT: omp.loop_nest (%[[ARG_0:.*]]) : i32 = (%[[LOWER]])
168 // CHECK-SAME: to (%[[UPPER]]) inclusive step (%[[LOWER]]) {
169 // CHECK: llvm.br ^[[BB1:.*]](%[[ITER]] : i64)
170 // CHECK: ^[[BB1]](%[[VAL_0:.*]]: i64):
171 // CHECK: %[[VAL_1:.*]] = llvm.icmp "slt" %[[VAL_0]], %[[ITER]] : i64
172 // CHECK: llvm.cond_br %[[VAL_1]], ^[[BB2:.*]], ^[[BB3:.*]]
174 // CHECK: %[[VAL_2:.*]] = llvm.add %[[VAL_0]], %[[ITER]] : i64
175 // CHECK: llvm.br ^[[BB1]](%[[VAL_2]] : i64)
178 func.func @loop_nest_block_arg(%val : i32, %ub : i32, %i : index) {
180 omp.loop_nest (%arg0) : i32 = (%val) to (%ub) inclusive step (%val) {
181 cf.br ^bb1(%i : index)
183 %1 = arith.cmpi slt, %0, %i : index
184 cf.cond_br %1, ^bb2, ^bb3
186 %2 = arith.addi %0, %i : index
187 cf.br ^bb1(%2 : index)
197 // CHECK-LABEL: @task_depend
198 // CHECK: (%[[ARG0:.*]]: !llvm.ptr) {
199 // CHECK: omp.task depend(taskdependin -> %[[ARG0]] : !llvm.ptr) {
200 // CHECK: omp.terminator
202 // CHECK: llvm.return
205 func.func @task_depend(%arg0: !llvm.ptr) {
206 omp.task depend(taskdependin -> %arg0 : !llvm.ptr) {
214 // CHECK-LABEL: @_QPomp_target_data
215 // CHECK: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr, %[[ARG2:.*]]: !llvm.ptr, %[[ARG3:.*]]: !llvm.ptr)
216 // CHECK: %[[MAP0:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
217 // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
218 // CHECK: %[[MAP2:.*]] = omp.map.info var_ptr(%[[ARG2]] : !llvm.ptr, i32) map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""}
219 // CHECK: omp.target_enter_data map_entries(%[[MAP0]], %[[MAP1]], %[[MAP2]] : !llvm.ptr, !llvm.ptr, !llvm.ptr)
220 // CHECK: %[[MAP3:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
221 // CHECK: %[[MAP4:.*]] = omp.map.info var_ptr(%[[ARG1]] : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
222 // CHECK: %[[MAP5:.*]] = omp.map.info var_ptr(%[[ARG2]] : !llvm.ptr, i32) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""}
223 // CHECK: %[[MAP6:.*]] = omp.map.info var_ptr(%[[ARG3]] : !llvm.ptr, i32) map_clauses(always, delete) capture(ByRef) -> !llvm.ptr {name = ""}
224 // CHECK: omp.target_exit_data map_entries(%[[MAP3]], %[[MAP4]], %[[MAP5]], %[[MAP6]] : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr)
226 llvm.func @_QPomp_target_data(%a : !llvm.ptr, %b : !llvm.ptr, %c : !llvm.ptr, %d : !llvm.ptr) {
227 %0 = omp.map.info var_ptr(%a : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
228 %1 = omp.map.info var_ptr(%b : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
229 %2 = omp.map.info var_ptr(%c : !llvm.ptr, i32) map_clauses(always, exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""}
230 omp.target_enter_data map_entries(%0, %1, %2 : !llvm.ptr, !llvm.ptr, !llvm.ptr) {}
231 %3 = omp.map.info var_ptr(%a : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
232 %4 = omp.map.info var_ptr(%b : !llvm.ptr, i32) map_clauses(from) capture(ByRef) -> !llvm.ptr {name = ""}
233 %5 = omp.map.info var_ptr(%c : !llvm.ptr, i32) map_clauses(exit_release_or_enter_alloc) capture(ByRef) -> !llvm.ptr {name = ""}
234 %6 = omp.map.info var_ptr(%d : !llvm.ptr, i32) map_clauses(always, delete) capture(ByRef) -> !llvm.ptr {name = ""}
235 omp.target_exit_data map_entries(%3, %4, %5, %6 : !llvm.ptr, !llvm.ptr, !llvm.ptr, !llvm.ptr) {}
241 // CHECK-LABEL: @_QPomp_target_data_region
242 // CHECK: (%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr) {
243 // CHECK: %[[MAP_0:.*]] = omp.map.info var_ptr(%[[ARG0]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
244 // CHECK: omp.target_data map_entries(%[[MAP_0]] : !llvm.ptr) {
245 // CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(10 : i32) : i32
246 // CHECK: llvm.store %[[VAL_1]], %[[ARG1]] : i32, !llvm.ptr
247 // CHECK: omp.terminator
249 // CHECK: llvm.return
251 llvm.func @_QPomp_target_data_region(%a : !llvm.ptr, %i : !llvm.ptr) {
252 %1 = omp.map.info var_ptr(%a : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
253 omp.target_data map_entries(%1 : !llvm.ptr) {
254 %2 = llvm.mlir.constant(10 : i32) : i32
255 llvm.store %2, %i : i32, !llvm.ptr
263 // CHECK-LABEL: llvm.func @_QPomp_target(
264 // CHECK: %[[ARG_0:.*]]: !llvm.ptr,
265 // CHECK: %[[ARG_1:.*]]: !llvm.ptr) {
266 // CHECK: %[[VAL_0:.*]] = llvm.mlir.constant(64 : i32) : i32
267 // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG_0]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
268 // CHECK: %[[MAP2:.*]] = omp.map.info var_ptr(%[[ARG_1]] : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = ""}
269 // CHECK: omp.target thread_limit(%[[VAL_0]] : i32) map_entries(%[[MAP1]] -> %[[BB_ARG0:.*]], %[[MAP2]] -> %[[BB_ARG1:.*]] : !llvm.ptr, !llvm.ptr) {
270 // CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(10 : i32) : i32
271 // CHECK: llvm.store %[[VAL_1]], %[[BB_ARG1]] : i32, !llvm.ptr
272 // CHECK: omp.terminator
274 // CHECK: llvm.return
277 llvm.func @_QPomp_target(%a : !llvm.ptr, %i : !llvm.ptr) {
278 %0 = llvm.mlir.constant(64 : i32) : i32
279 %1 = omp.map.info var_ptr(%a : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
280 %3 = omp.map.info var_ptr(%i : !llvm.ptr, i32) map_clauses(implicit, exit_release_or_enter_alloc) capture(ByCopy) -> !llvm.ptr {name = ""}
281 omp.target thread_limit(%0 : i32) map_entries(%1 -> %arg0, %3 -> %arg1 : !llvm.ptr, !llvm.ptr) {
282 %2 = llvm.mlir.constant(10 : i32) : i32
283 llvm.store %2, %arg1 : i32, !llvm.ptr
291 // CHECK-LABEL: @_QPsb
292 // CHECK: omp.sections
293 // CHECK: omp.section
296 // CHECK: llvm.cond_br
298 // CHECK: omp.terminator
299 // CHECK: omp.terminator
300 // CHECK: llvm.return
303 %0 = llvm.mlir.constant(0 : i64) : i64
304 %1 = llvm.mlir.constant(10 : i64) : i64
305 %2 = llvm.mlir.constant(1 : i64) : i64
308 llvm.br ^bb1(%1 : i64)
309 ^bb1(%3: i64): // 2 preds: ^bb0, ^bb2
310 %4 = llvm.icmp "sgt" %3, %0 : i64
311 llvm.cond_br %4, ^bb2, ^bb3
313 %5 = llvm.sub %3, %2 : i64
314 llvm.br ^bb1(%5 : i64)
325 // CHECK: omp.declare_reduction @eqv_reduction : i32 init
326 // CHECK: ^bb0(%{{.*}}: i32):
327 // CHECK: %[[TRUE:.*]] = llvm.mlir.constant(true) : i1
328 // CHECK: %[[TRUE_EXT:.*]] = llvm.zext %[[TRUE]] : i1 to i32
329 // CHECK: omp.yield(%[[TRUE_EXT]] : i32)
330 // CHECK: } combiner {
331 // CHECK: ^bb0(%[[ARG_1:.*]]: i32, %[[ARG_2:.*]]: i32):
332 // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i32
333 // CHECK: %[[CMP_1:.*]] = llvm.icmp "ne" %[[ARG_1]], %[[ZERO]] : i32
334 // CHECK: %[[CMP_2:.*]] = llvm.icmp "ne" %[[ARG_2]], %[[ZERO]] : i32
335 // CHECK: %[[COMBINE_VAL:.*]] = llvm.icmp "eq" %[[CMP_1]], %[[CMP_2]] : i1
336 // CHECK: %[[COMBINE_VAL_EXT:.*]] = llvm.zext %[[COMBINE_VAL]] : i1 to i32
337 // CHECK: omp.yield(%[[COMBINE_VAL_EXT]] : i32)
338 // CHECK-LABEL: @_QPsimple_reduction
339 // CHECK: %[[RED_ACCUMULATOR:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr
340 // CHECK: omp.parallel
341 // CHECK: omp.wsloop reduction(@eqv_reduction %{{.+}} -> %[[PRV:.+]] : !llvm.ptr)
342 // CHECK-NEXT: omp.loop_nest {{.*}}{
343 // CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> i32
344 // CHECK: %[[CMP:.+]] = llvm.icmp "eq" %{{.*}}, %[[LPRV]] : i32
345 // CHECK: %[[ZEXT:.+]] = llvm.zext %[[CMP]] : i1 to i32
346 // CHECK: llvm.store %[[ZEXT]], %[[PRV]] : i32, !llvm.ptr
348 // CHECK: omp.terminator
349 // CHECK: llvm.return
351 omp.declare_reduction @eqv_reduction : i32 init {
353 %0 = llvm.mlir.constant(true) : i1
354 %1 = llvm.zext %0 : i1 to i32
357 ^bb0(%arg0: i32, %arg1: i32):
358 %0 = llvm.mlir.constant(0 : i64) : i32
359 %1 = llvm.icmp "ne" %arg0, %0 : i32
360 %2 = llvm.icmp "ne" %arg1, %0 : i32
361 %3 = llvm.icmp "eq" %1, %2 : i1
362 %4 = llvm.zext %3 : i1 to i32
365 llvm.func @_QPsimple_reduction(%arg0: !llvm.ptr {fir.bindc_name = "y"}) {
366 %0 = llvm.mlir.constant(100 : i32) : i32
367 %1 = llvm.mlir.constant(1 : i32) : i32
368 %2 = llvm.mlir.constant(true) : i1
369 %3 = llvm.mlir.constant(1 : i64) : i64
370 %4 = llvm.alloca %3 x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr
371 %5 = llvm.zext %2 : i1 to i32
372 llvm.store %5, %4 : i32, !llvm.ptr
374 %6 = llvm.alloca %3 x i32 {adapt.valuebyref, in_type = i32, operandSegmentSizes = array<i32: 0, 0>, pinned} : (i64) -> !llvm.ptr
375 omp.wsloop reduction(@eqv_reduction %4 -> %prv : !llvm.ptr) {
376 omp.loop_nest (%arg1) : i32 = (%1) to (%0) inclusive step (%1) {
377 llvm.store %arg1, %6 : i32, !llvm.ptr
378 %7 = llvm.load %6 : !llvm.ptr -> i32
379 %8 = llvm.sext %7 : i32 to i64
380 %9 = llvm.sub %8, %3 : i64
381 %10 = llvm.getelementptr %arg0[0, %9] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<100 x i32>
382 %11 = llvm.load %10 : !llvm.ptr -> i32
383 %12 = llvm.load %prv : !llvm.ptr -> i32
384 %13 = llvm.icmp "eq" %11, %12 : i32
385 %14 = llvm.zext %13 : i1 to i32
386 llvm.store %14, %prv : i32, !llvm.ptr
397 // CHECK-LABEL: @_QQmain
398 llvm.func @_QQmain() {
399 %0 = llvm.mlir.constant(0 : index) : i64
400 %1 = llvm.mlir.constant(5 : index) : i64
401 %2 = llvm.mlir.constant(1 : index) : i64
402 %3 = llvm.mlir.constant(1 : i64) : i64
403 %4 = llvm.alloca %3 x i32 : (i64) -> !llvm.ptr
404 // CHECK: omp.taskgroup
406 %5 = llvm.trunc %2 : i64 to i32
407 llvm.br ^bb1(%5, %1 : i32, i64)
408 ^bb1(%6: i32, %7: i64): // 2 preds: ^bb0, ^bb2
409 %8 = llvm.icmp "sgt" %7, %0 : i64
410 llvm.cond_br %8, ^bb2, ^bb3
412 llvm.store %6, %4 : i32, !llvm.ptr
415 // CHECK: llvm.call @[[CALL_FUNC:.*]]({{.*}}) :
416 llvm.call @_QFPdo_work(%4) : (!llvm.ptr) -> ()
417 // CHECK: omp.terminator
420 %9 = llvm.load %4 : !llvm.ptr -> i32
421 %10 = llvm.add %9, %5 : i32
422 %11 = llvm.sub %7, %2 : i64
423 llvm.br ^bb1(%10, %11 : i32, i64)
425 llvm.store %6, %4 : i32, !llvm.ptr
426 // CHECK: omp.terminator
431 // CHECK: @[[CALL_FUNC]]
432 llvm.func @_QFPdo_work(%arg0: !llvm.ptr {fir.bindc_name = "i"}) {
438 // CHECK-LABEL: @sub_
440 %0 = llvm.mlir.constant(0 : index) : i64
441 %1 = llvm.mlir.constant(1 : index) : i64
442 %2 = llvm.mlir.constant(1 : i64) : i64
443 %3 = llvm.alloca %2 x i32 {bindc_name = "i", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFsubEi"} : (i64) -> !llvm.ptr
444 // CHECK: omp.ordered.region
446 %4 = llvm.trunc %1 : i64 to i32
447 llvm.br ^bb1(%4, %1 : i32, i64)
448 ^bb1(%5: i32, %6: i64): // 2 preds: ^bb0, ^bb2
449 %7 = llvm.icmp "sgt" %6, %0 : i64
450 llvm.cond_br %7, ^bb2, ^bb3
452 llvm.store %5, %3 : i32, !llvm.ptr
453 %8 = llvm.load %3 : !llvm.ptr -> i32
455 %9 = arith.addi %8, %4 : i32
457 %10 = arith.subi %6, %1 : i64
458 llvm.br ^bb1(%9, %10 : i32, i64)
460 llvm.store %5, %3 : i32, !llvm.ptr
461 // CHECK: omp.terminator
469 // CHECK-LABEL: llvm.func @_QPtarget_map_with_bounds(
470 // CHECK: %[[ARG_0:.*]]: !llvm.ptr, %[[ARG_1:.*]]: !llvm.ptr, %[[ARG_2:.*]]: !llvm.ptr) {
471 // CHECK: %[[C_01:.*]] = llvm.mlir.constant(4 : index) : i64
472 // CHECK: %[[C_02:.*]] = llvm.mlir.constant(1 : index) : i64
473 // CHECK: %[[C_03:.*]] = llvm.mlir.constant(1 : index) : i64
474 // CHECK: %[[C_04:.*]] = llvm.mlir.constant(1 : index) : i64
475 // CHECK: %[[BOUNDS0:.*]] = omp.map.bounds lower_bound(%[[C_02]] : i64) upper_bound(%[[C_01]] : i64) stride(%[[C_04]] : i64) start_idx(%[[C_04]] : i64)
476 // CHECK: %[[MAP0:.*]] = omp.map.info var_ptr(%[[ARG_1]] : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS0]]) -> !llvm.ptr {name = ""}
477 // CHECK: %[[C_11:.*]] = llvm.mlir.constant(4 : index) : i64
478 // CHECK: %[[C_12:.*]] = llvm.mlir.constant(1 : index) : i64
479 // CHECK: %[[C_13:.*]] = llvm.mlir.constant(1 : index) : i64
480 // CHECK: %[[C_14:.*]] = llvm.mlir.constant(1 : index) : i64
481 // CHECK: %[[BOUNDS1:.*]] = omp.map.bounds lower_bound(%[[C_12]] : i64) upper_bound(%[[C_11]] : i64) stride(%[[C_14]] : i64) start_idx(%[[C_14]] : i64)
482 // CHECK: %[[MAP1:.*]] = omp.map.info var_ptr(%[[ARG_2]] : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%[[BOUNDS1]]) -> !llvm.ptr {name = ""}
483 // CHECK: omp.target map_entries(%[[MAP0]] -> %[[BB_ARG0:.*]], %[[MAP1]] -> %[[BB_ARG1:.*]] : !llvm.ptr, !llvm.ptr) {
484 // CHECK: omp.terminator
486 // CHECK: llvm.return
489 llvm.func @_QPtarget_map_with_bounds(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
490 %0 = llvm.mlir.constant(4 : index) : i64
491 %1 = llvm.mlir.constant(1 : index) : i64
492 %2 = llvm.mlir.constant(1 : index) : i64
493 %3 = llvm.mlir.constant(1 : index) : i64
494 %4 = omp.map.bounds lower_bound(%1 : i64) upper_bound(%0 : i64) stride(%3 : i64) start_idx(%3 : i64)
495 %5 = omp.map.info var_ptr(%arg1 : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%4) -> !llvm.ptr {name = ""}
496 %6 = llvm.mlir.constant(4 : index) : i64
497 %7 = llvm.mlir.constant(1 : index) : i64
498 %8 = llvm.mlir.constant(1 : index) : i64
499 %9 = llvm.mlir.constant(1 : index) : i64
500 %10 = omp.map.bounds lower_bound(%7 : i64) upper_bound(%6 : i64) stride(%9 : i64) start_idx(%9 : i64)
501 %11 = omp.map.info var_ptr(%arg2 : !llvm.ptr, !llvm.array<10 x i32>) map_clauses(tofrom) capture(ByRef) bounds(%10) -> !llvm.ptr {name = ""}
502 omp.target map_entries(%5 -> %arg3, %11 -> %arg4: !llvm.ptr, !llvm.ptr) {
510 // CHECK: omp.private {type = private} @x.privatizer : !llvm.struct<{{.*}}> alloc {
511 omp.private {type = private} @x.privatizer : memref<?xf32> alloc {
512 // CHECK: ^bb0(%arg0: !llvm.struct<{{.*}}>):
513 ^bb0(%arg0: memref<?xf32>):
514 // CHECK: omp.yield(%arg0 : !llvm.struct<{{.*}}>)
515 omp.yield(%arg0 : memref<?xf32>)
520 // CHECK: omp.private {type = firstprivate} @y.privatizer : i64 alloc {
521 omp.private {type = firstprivate} @y.privatizer : index alloc {
522 // CHECK: ^bb0(%arg0: i64):
524 // CHECK: omp.yield(%arg0 : i64)
525 omp.yield(%arg0 : index)
528 // CHECK: ^bb0(%arg0: i64, %arg1: i64):
529 ^bb0(%arg0: index, %arg1: index):
530 // CHECK: omp.yield(%arg0 : i64)
531 omp.yield(%arg0 : index)
536 // CHECK-LABEL: llvm.func @omp_cancel_cancellation_point()
537 func.func @omp_cancel_cancellation_point() -> () {
539 // CHECK: omp.cancel cancellation_construct_type(parallel)
540 omp.cancel cancellation_construct_type(parallel)
541 // CHECK: omp.cancellation_point cancellation_construct_type(parallel)
542 omp.cancellation_point cancellation_construct_type(parallel)
550 // CHECK-LABEL: llvm.func @omp_distribute(
551 // CHECK-SAME: %[[ARG0:.*]]: i64)
552 func.func @omp_distribute(%arg0 : index) -> () {
553 // CHECK: omp.distribute dist_schedule_static dist_schedule_chunk_size(%[[ARG0]] : i64) {
554 omp.distribute dist_schedule_static dist_schedule_chunk_size(%arg0 : index) {
555 omp.loop_nest (%iv) : index = (%arg0) to (%arg0) step (%arg0) {
564 // CHECK-LABEL: llvm.func @omp_teams(
565 // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr, %[[ARG2:.*]]: i64)
566 func.func @omp_teams(%arg0 : memref<i32>) -> () {
567 // CHECK: omp.teams allocate(%{{.*}} : !llvm.struct<(ptr, ptr, i64)> -> %{{.*}} : !llvm.struct<(ptr, ptr, i64)>)
568 omp.teams allocate(%arg0 : memref<i32> -> %arg0 : memref<i32>) {
576 // CHECK-LABEL: llvm.func @omp_ordered(
577 // CHECK-SAME: %[[ARG0:.*]]: i64)
578 func.func @omp_ordered(%arg0 : index) -> () {
579 omp.wsloop ordered(1) {
580 omp.loop_nest (%iv) : index = (%arg0) to (%arg0) step (%arg0) {
581 // CHECK: omp.ordered depend_vec(%[[ARG0]] : i64) {doacross_num_loops = 1 : i64}
582 omp.ordered depend_vec(%arg0 : index) {doacross_num_loops = 1 : i64}
591 // CHECK-LABEL: @omp_taskloop(
592 // CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: !llvm.ptr, %[[ARG2:.*]]: !llvm.ptr, %[[ARG3:.*]]: i64)
593 func.func @omp_taskloop(%arg0: index, %arg1 : memref<i32>) {
594 // CHECK: omp.parallel {
596 // CHECK: omp.taskloop allocate(%{{.*}} : !llvm.struct<(ptr, ptr, i64)> -> %{{.*}} : !llvm.struct<(ptr, ptr, i64)>) {
597 omp.taskloop allocate(%arg1 : memref<i32> -> %arg1 : memref<i32>) {
598 // CHECK: omp.loop_nest (%[[IV:.*]]) : i64 = (%[[ARG0]]) to (%[[ARG0]]) step (%[[ARG0]]) {
599 omp.loop_nest (%iv) : index = (%arg0) to (%arg0) step (%arg0) {
600 // CHECK-DAG: %[[CAST_IV:.*]] = builtin.unrealized_conversion_cast %[[IV]] : i64 to index
601 // CHECK: "test.payload"(%[[CAST_IV]]) : (index) -> ()
602 "test.payload"(%iv) : (index) -> ()