1 // Test code-gen for `omp.parallel` ops with delayed privatizers (i.e. using
4 // RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s
6 llvm.func @parallel_op_1_private(%arg0: !llvm.ptr) {
7 omp.parallel private(@x.privatizer %arg0 -> %arg2 : !llvm.ptr) {
8 %0 = llvm.load %arg2 : !llvm.ptr -> f32
14 // CHECK-LABEL: @parallel_op_1_private
15 // CHECK-SAME: (ptr %[[ORIG:.*]]) {
16 // CHECK: %[[OMP_PAR_ARG:.*]] = alloca { ptr }, align 8
17 // CHECK: %[[ORIG_GEP:.*]] = getelementptr { ptr }, ptr %[[OMP_PAR_ARG]], i32 0, i32 0
18 // CHECK: store ptr %[[ORIG]], ptr %[[ORIG_GEP]], align 8
19 // CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 1, ptr @parallel_op_1_private..omp_par, ptr %[[OMP_PAR_ARG]])
22 // CHECK-LABEL: void @parallel_op_1_private..omp_par
23 // CHECK-SAME: (ptr noalias %{{.*}}, ptr noalias %{{.*}}, ptr %[[ARG:.*]])
24 // CHECK: %[[ORIG_PTR_PTR:.*]] = getelementptr { ptr }, ptr %[[ARG]], i32 0, i32 0
25 // CHECK: %[[ORIG_PTR:.*]] = load ptr, ptr %[[ORIG_PTR_PTR]], align 8
27 // Check that the privatizer alloc region was inlined properly.
28 // CHECK: %[[PRIV_ALLOC:.*]] = alloca float, align 4
29 // CHECK: %[[ORIG_VAL:.*]] = load float, ptr %[[ORIG_PTR]], align 4
30 // CHECK: store float %[[ORIG_VAL]], ptr %[[PRIV_ALLOC]], align 4
33 // Check that the privatized value is used (rather than the original one).
34 // CHECK: load float, ptr %[[PRIV_ALLOC]], align 4
37 llvm.func @parallel_op_2_privates(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
38 omp.parallel private(@x.privatizer %arg0 -> %arg2, @y.privatizer %arg1 -> %arg3 : !llvm.ptr, !llvm.ptr) {
39 %0 = llvm.load %arg2 : !llvm.ptr -> f32
40 %1 = llvm.load %arg3 : !llvm.ptr -> i32
46 // CHECK-LABEL: @parallel_op_2_privates
47 // CHECK-SAME: (ptr %[[ORIG1:.*]], ptr %[[ORIG2:.*]]) {
48 // CHECK: %[[OMP_PAR_ARG:.*]] = alloca { ptr, ptr }, align 8
49 // CHECK: %[[ORIG1_GEP:.*]] = getelementptr { ptr, ptr }, ptr %[[OMP_PAR_ARG]], i32 0, i32 0
50 // CHECK: store ptr %[[ORIG1]], ptr %[[ORIG1_GEP]], align 8
51 // CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 1, ptr @parallel_op_2_privates..omp_par, ptr %[[OMP_PAR_ARG]])
54 // CHECK-LABEL: void @parallel_op_2_privates..omp_par
55 // CHECK-SAME: (ptr noalias %{{.*}}, ptr noalias %{{.*}}, ptr %[[ARG:.*]])
56 // CHECK: %[[ORIG1_PTR_PTR:.*]] = getelementptr { ptr, ptr }, ptr %[[ARG]], i32 0, i32 0
57 // CHECK: %[[ORIG1_PTR:.*]] = load ptr, ptr %[[ORIG1_PTR_PTR]], align 8
58 // CHECK: %[[ORIG2_PTR_PTR:.*]] = getelementptr { ptr, ptr }, ptr %[[ARG]], i32 0, i32 1
59 // CHECK: %[[ORIG2_PTR:.*]] = load ptr, ptr %[[ORIG2_PTR_PTR]], align 8
61 // Check that the privatizer alloc region was inlined properly.
62 // CHECK: %[[PRIV1_ALLOC:.*]] = alloca float, align 4
63 // CHECK: %[[ORIG1_VAL:.*]] = load float, ptr %[[ORIG1_PTR]], align 4
64 // CHECK: store float %[[ORIG1_VAL]], ptr %[[PRIV1_ALLOC]], align 4
65 // CHECK: %[[PRIV2_ALLOC:.*]] = alloca i32, align 4
66 // CHECK: %[[ORIG2_VAL:.*]] = load i32, ptr %[[ORIG2_PTR]], align 4
67 // CHECK: store i32 %[[ORIG2_VAL]], ptr %[[PRIV2_ALLOC]], align 4
70 // Check that the privatized value is used (rather than the original one).
71 // CHECK: load float, ptr %[[PRIV1_ALLOC]], align 4
72 // CHECK: load i32, ptr %[[PRIV2_ALLOC]], align 4
75 omp.private {type = private} @x.privatizer : !llvm.ptr alloc {
76 ^bb0(%arg0: !llvm.ptr):
77 %c1 = llvm.mlir.constant(1 : i32) : i32
78 %0 = llvm.alloca %c1 x f32 : (i32) -> !llvm.ptr
79 %1 = llvm.load %arg0 : !llvm.ptr -> f32
80 llvm.store %1, %0 : f32, !llvm.ptr
81 omp.yield(%0 : !llvm.ptr)
84 omp.private {type = private} @y.privatizer : !llvm.ptr alloc {
85 ^bb0(%arg0: !llvm.ptr):
86 %c1 = llvm.mlir.constant(1 : i32) : i32
87 %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
88 %1 = llvm.load %arg0 : !llvm.ptr -> i32
89 llvm.store %1, %0 : i32, !llvm.ptr
90 omp.yield(%0 : !llvm.ptr)
95 llvm.func @parallel_op_private_multi_block(%arg0: !llvm.ptr) {
96 omp.parallel private(@multi_block.privatizer %arg0 -> %arg2 : !llvm.ptr) {
97 %0 = llvm.load %arg2 : !llvm.ptr -> f32
103 // CHECK-LABEL: define internal void @parallel_op_private_multi_block..omp_par
104 // CHECK: omp.par.entry:
105 // CHECK: %[[ORIG_PTR_PTR:.*]] = getelementptr { ptr }, ptr %{{.*}}, i32 0, i32 0
106 // CHECK: %[[ORIG_PTR:.*]] = load ptr, ptr %[[ORIG_PTR_PTR]], align 8
107 // CHECK: br label %omp.private.latealloc
109 // CHECK: omp.private.latealloc:
110 // CHECK: br label %[[PRIV_BB1:.*]]
112 // Check contents of the first block in the `alloc` region.
113 // CHECK: [[PRIV_BB1]]:
114 // CHECK-NEXT: %[[PRIV_ALLOC:.*]] = alloca float, align 4
115 // CHECK-NEXT: br label %[[PRIV_BB2:.*]]
117 // Check contents of the second block in the `alloc` region.
118 // CHECK: [[PRIV_BB2]]:
119 // CHECK-NEXT: %[[ORIG_PTR2:.*]] = phi ptr [ %[[ORIG_PTR]], %[[PRIV_BB1]] ]
120 // CHECK-NEXT: %[[PRIV_ALLOC2:.*]] = phi ptr [ %[[PRIV_ALLOC]], %[[PRIV_BB1]] ]
121 // CHECK-NEXT: %[[ORIG_VAL:.*]] = load float, ptr %[[ORIG_PTR2]], align 4
122 // CHECK-NEXT: store float %[[ORIG_VAL]], ptr %[[PRIV_ALLOC2]], align 4
123 // CHECK-NEXT: br label %[[PRIV_CONT:.*]]
125 // Check that the privatizer's continuation block yileds the private clone's
127 // CHECK: [[PRIV_CONT]]:
128 // CHECK-NEXT: %[[PRIV_ALLOC3:.*]] = phi ptr [ %[[PRIV_ALLOC2]], %[[PRIV_BB2]] ]
129 // CHECK-NEXT: br label %[[PAR_REG:.*]]
131 // Check that the body of the parallel region loads from the private clone.
132 // CHECK: [[PAR_REG]]:
133 // CHECK: %{{.*}} = load float, ptr %[[PRIV_ALLOC3]], align 4
135 omp.private {type = private} @multi_block.privatizer : !llvm.ptr alloc {
136 ^bb0(%arg0: !llvm.ptr):
137 %c1 = llvm.mlir.constant(1 : i32) : i32
138 %0 = llvm.alloca %c1 x f32 : (i32) -> !llvm.ptr
139 llvm.br ^bb1(%arg0, %0 : !llvm.ptr, !llvm.ptr)
141 ^bb1(%arg1: !llvm.ptr, %arg2: !llvm.ptr):
142 %1 = llvm.load %arg1 : !llvm.ptr -> f32
143 llvm.store %1, %arg2 : f32, !llvm.ptr
144 omp.yield(%arg2 : !llvm.ptr)
147 // Tests fix for Fujitsu test suite test: 0007_0019.f90: the
148 // `llvm.mlir.addressof` op needs access to the parent module when lowering
149 // from the LLVM dialect to LLVM IR. If such op is used inside an `omp.private`
150 // op instance that was not created/cloned inside the module, we would get a
151 // seg fault due to trying to access a null pointer.
153 // CHECK-LABEL: define internal void @lower_region_with_addressof..omp_par
154 // CHECK: omp.par.region:
155 // CHECK: br label %[[PAR_REG_BEG:.*]]
156 // CHECK: [[PAR_REG_BEG]]:
157 // CHECK: call void @bar(ptr getelementptr (double, ptr @_QQfoo, i64 111))
158 // CHECK: call void @bar(ptr getelementptr (double, ptr @_QQfoo, i64 222))
159 llvm.func @lower_region_with_addressof() {
160 %0 = llvm.mlir.constant(1 : i64) : i64
161 %1 = llvm.alloca %0 x f64 {bindc_name = "u1"} : (i64) -> !llvm.ptr
162 omp.parallel private(@_QFlower_region_with_addressof_privatizer %1 -> %arg0 : !llvm.ptr) {
163 %c0 = llvm.mlir.constant(111 : i64) : i64
164 %2 = llvm.getelementptr %arg0[%c0] : (!llvm.ptr, i64) -> !llvm.ptr, f64
165 llvm.call @bar(%2) : (!llvm.ptr) -> ()
167 %c1 = llvm.mlir.constant(222 : i64) : i64
168 %3 = llvm.mlir.addressof @_QQfoo: !llvm.ptr
169 %4 = llvm.getelementptr %3[%c1] : (!llvm.ptr, i64) -> !llvm.ptr, f64
170 llvm.call @bar(%4) : (!llvm.ptr) -> ()
177 omp.private {type = private} @_QFlower_region_with_addressof_privatizer : !llvm.ptr alloc {
178 ^bb0(%arg0: !llvm.ptr):
179 %0 = llvm.mlir.addressof @_QQfoo: !llvm.ptr
180 omp.yield(%0 : !llvm.ptr)
183 llvm.mlir.global linkonce constant @_QQfoo() {addr_space = 0 : i32} : !llvm.array<3 x i8> {
184 %0 = llvm.mlir.constant("foo") : !llvm.array<3 x i8>
185 llvm.return %0 : !llvm.array<3 x i8>
188 llvm.func @bar(!llvm.ptr)
191 // Tests fix for Fujitsu test suite test: 0275_0032.f90. The MLIR to LLVM
192 // translation logic assumed that reduction arguments to an `omp.parallel`
193 // op are always the last set of arguments to the op. However, this is a
194 // wrong assumption since private args come afterward. This tests the fix
195 // that we access the different sets of args properly.
197 // CHECK-LABEL: define internal void @private_and_reduction_..omp_par
198 // CHECK-DAG: %[[PRV_ALLOC:.*]] = alloca float, i64 1, align 4
199 // CHECK-DAG: %[[RED_ALLOC:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, i64 1, align 8
201 // CHECK: omp.par.region:
202 // CHECK: br label %[[PAR_REG_BEG:.*]]
203 // CHECK: [[PAR_REG_BEG]]:
204 // CHECK-NEXT: %{{.*}} = load { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr %[[RED_ALLOC]], align 8
205 // CHECK-NEXT: store float 8.000000e+00, ptr %[[PRV_ALLOC]], align 4
207 llvm.func @private_and_reduction_() attributes {fir.internal_name = "_QPprivate_and_reduction", frame_pointer = #llvm.framePointerKind<all>, target_cpu = "x86-64"} {
208 %0 = llvm.mlir.constant(1 : i64) : i64
209 %1 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> : (i64) -> !llvm.ptr
210 %2 = llvm.alloca %0 x f32 {bindc_name = "to_priv"} : (i64) -> !llvm.ptr
211 omp.parallel private(@privatizer.part %2 -> %arg1 : !llvm.ptr) reduction(byref @reducer.part %1 -> %arg0 : !llvm.ptr) {
212 %3 = llvm.load %arg0 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
213 %4 = llvm.mlir.constant(8.000000e+00 : f32) : f32
214 llvm.store %4, %arg1 : f32, !llvm.ptr
220 omp.private {type = private} @privatizer.part : !llvm.ptr alloc {
221 ^bb0(%arg0: !llvm.ptr):
222 %0 = llvm.mlir.constant(1 : i64) : i64
223 %1 = llvm.alloca %0 x f32 {bindc_name = "to_priv", pinned} : (i64) -> !llvm.ptr
224 omp.yield(%1 : !llvm.ptr)
227 omp.declare_reduction @reducer.part : !llvm.ptr alloc {
228 %0 = llvm.mlir.constant(1 : i64) : i64
229 %1 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> : (i64) -> !llvm.ptr
230 omp.yield(%1 : !llvm.ptr)
232 ^bb0(%mold: !llvm.ptr, %alloc: !llvm.ptr):
233 omp.yield(%alloc : !llvm.ptr)
235 ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
236 omp.yield(%arg0 : !llvm.ptr)
238 ^bb0(%arg0: !llvm.ptr):
244 llvm.func @_QPequivalence() {
245 %0 = llvm.mlir.constant(1 : i64) : i64
246 %1 = llvm.alloca %0 x !llvm.array<4 x i8> : (i64) -> !llvm.ptr
247 %2 = llvm.mlir.constant(0 : index) : i64
248 %3 = llvm.getelementptr %1[0, %2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<4 x i8>
249 omp.parallel private(@_QFequivalenceEx_firstprivate_ptr_f32 %3 -> %arg0 : !llvm.ptr) {
250 %4 = llvm.mlir.constant(3.140000e+00 : f32) : f32
251 llvm.store %4, %arg0 : f32, !llvm.ptr
257 omp.private {type = firstprivate} @_QFequivalenceEx_firstprivate_ptr_f32 : !llvm.ptr alloc {
258 ^bb0(%arg0: !llvm.ptr):
259 %0 = llvm.mlir.constant(1 : i64) : i64
260 %1 = llvm.alloca %0 x f32 {bindc_name = "x", pinned} : (i64) -> !llvm.ptr
261 omp.yield(%1 : !llvm.ptr)
263 ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
264 %0 = llvm.load %arg0 : !llvm.ptr -> f32
265 llvm.store %0, %arg1 : f32, !llvm.ptr
266 omp.yield(%arg1 : !llvm.ptr)
269 // CHECK: define internal void @_QPequivalence..omp_par
270 // CHECK-NOT: define {{.*}} @{{.*}}
271 // CHECK: %[[PRIV_ALLOC:.*]] = alloca float, i64 1, align 4
272 // CHECK: %[[HOST_VAL:.*]] = load float, ptr %{{.*}}, align 4
273 // Test that we initialize the firstprivate variable.
274 // CHECK: store float %[[HOST_VAL]], ptr %[[PRIV_ALLOC]], align 4
275 // Test that we inlined the body of the parallel region.
276 // CHECK: store float 0x{{.*}}, ptr %[[PRIV_ALLOC]], align 4