1 /*---------------------------------------------------------------------------*\
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
5 \\ / A nd | Copyright held by original author
7 -------------------------------------------------------------------------------
9 This file is part of OpenFOAM.
11 OpenFOAM is free software; you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by the
13 Free Software Foundation; either version 2 of the License, or (at your
14 option) any later version.
16 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with OpenFOAM; if not, write to the Free Software Foundation,
23 Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 \*---------------------------------------------------------------------------*/
29 #include "tetPolyMeshCellDecomp.H"
31 #include "PstreamCombineReduceOps.H"
32 #include "processorTetPolyPatchCellDecomp.H"
33 #include "globalTetPolyPatchCellDecomp.H"
35 #include "labelIOList.H"
36 #include "globalMeshData.H"
40 // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
42 // Thing label combination class
44 class globalPatchCombine
50 SLList<Type>& globalObjects,
51 const SLList<Type>& myObjects
54 // For each of my points check whether it exists in the global
55 // points list; if not, add it to the global points
59 typename SLList<Type>::const_iterator myObjectsIter =
61 myObjectsIter != myObjects.end();
65 const Type& curMyType = myObjectsIter();
71 typename SLList<Type>::iterator globalObjectsIter =
72 globalObjects.begin();
73 globalObjectsIter != globalObjects.end();
77 if (globalObjectsIter() == curMyType)
86 globalObjects.append(curMyType);
93 void Foam::tetPolyMeshCellDecomp::addParallelPointPatch()
96 // The processor point patch will be added if processor boundaries
97 // exist in the case. If the mesh with processor boundaries is
98 // not created during a parallel run (e.g. decomposePar), the
99 // addressing will be dummy. HJ, 19/Mar/2002
101 if (mesh_.globalData().parallel())
103 // This cannot be done if I do not know global point/face/cell mapping;
104 // at the very least I need it on all parallel patches. Having in mind
105 // the added expense of reading/writing of this data, the global
106 // mapping files will still be used. I can do the globally
107 // shared points because they carry their own global labels,
108 // but not for cut edges shared by several processors.
113 Info<< "void tetPolyMeshCellDecomp::addParallelPointPatch() : "
114 << "Reading pointProcAddressing." << endl;
117 labelIOList pointProcAddressing
121 "pointProcAddressing",
122 mesh_.time().findInstance(operator()().meshDir(), "faces"),
132 Info<< "void tetPolyMeshCellDecomp::addParallelPointPatch() : "
133 << "Reading cellProcAddressing." << endl;
136 labelIOList cellProcAddressing
140 "cellProcAddressing",
141 mesh_.time().findInstance(operator()().meshDir(), "faces"),
149 const label globalCellOffset = mesh_.globalData().nTotalPoints();
151 // Add the cell centres to the lookup list
152 label oldSize = pointProcAddressing.size();
153 pointProcAddressing.setSize(oldSize + cellProcAddressing.size());
155 forAll (cellProcAddressing, cellI)
157 pointProcAddressing[oldSize + cellI] =
158 cellProcAddressing[cellI] + globalCellOffset;
161 // Get the list of local parallel processor points, their addressing
163 const labelList& localParPoints =
164 mesh_.globalData().sharedPointLabels();
168 // Get the list of local parallel processor edges
169 const edgeList& localParEdges = parallelEdges();
171 // Extract global numbers for each of the parallel edges
172 SLList<edge> globalParEdges;
174 forAll (localParEdges, edgeI)
176 globalParEdges.append
180 pointProcAddressing[localParEdges[edgeI].start()],
181 pointProcAddressing[localParEdges[edgeI].end()]
186 // Create the local-to-global edge mapping
187 labelList localEdgeMapping(localParEdges.size(), -1);
189 if (Pstream::parRun())
191 // Create a global list of edges by reduction from all processors
192 combineReduce(globalParEdges, globalPatchCombine<edge>());
194 // Find out which of the parallel edges are local. For
195 // easier search indexing make a plain list out ot the
196 // singly linked list of global edges
197 edgeList ge(globalParEdges);
199 forAll (localParEdges, edgeI)
204 pointProcAddressing[localParEdges[edgeI].start()],
205 pointProcAddressing[localParEdges[edgeI].end()]
210 if (curGlobal == ge[geI])
212 localEdgeMapping[edgeI] = geI;
222 if (localEdgeMapping.size() > 0)
224 if (min(localEdgeMapping) < 0)
228 "void Foam::tetPolyMeshCellDecomp::"
229 "addParallelPointPatch()"
230 ) << "Error in parallel points patch: edges"
231 << abort(FatalError);
236 // Do parallel cut edges
239 // Parallel cut edges come in two flavours: the ones local to
240 // the processor and the ones shared between several
241 // processors. The first lot cause no trouble, but for the
242 // second we need to make sure that only one processor
243 // multiplies out the global sum part of the matrix.
244 // At the same time, the local part of the product needs to be
245 // done on every processor seprately. This is catered for by
247 // 1) Local edges will be calculated here for every patch.
248 // The list needs to be complete.
249 // 2) As the lists are calculated, a global list of cut edges
250 // is assembled. The first time the edge is added into the
251 // global list, it is accepted for global sum multiplication.
252 // If the same global edge is found, the contribution to the
253 // global sum is blocked.
255 // Count the maximum number of edges coming from the patch
256 label maxEdgesOnPatch = 0;
258 // For every local point get a list of edges
259 forAll (localParPoints, pointI)
261 maxEdgesOnPatch += nEdgesForPoint(localParPoints[pointI]);
264 edgeList localCutEdges(maxEdgesOnPatch);
267 // Go through all the local points and get all the edges coming
268 // from that point. Check if the edge is local, if not, it is cut
270 const unallocLabelList& L = lduAddr().lowerAddr();
271 const unallocLabelList& U = lduAddr().upperAddr();
273 forAll (localParPoints, pointI)
275 const label curPoint = localParPoints[pointI];
277 labelList curEdges = edgesForPoint(curPoint);
279 forAll (curEdges, edgeI)
281 edge e(L[curEdges[edgeI]], U[curEdges[edgeI]]);
284 if (e.start() == curPoint)
290 otherEnd = e.start();
293 // If the other end is not a local point, this is a cut edge
296 forAll (localParPoints, compI)
298 if (localParPoints[compI] == otherEnd)
307 // This is a cut edge. Add it to the list
308 localCutEdges[nCutEdges] = e;
314 // Reset the size of the local cut edge list
315 localCutEdges.setSize(nCutEdges);
317 // Create a masking array. For the edges that do not contribute
318 // to the global product, the mask will be set to zero
319 scalarField localCutEdgeMask(localCutEdges.size(), 1);
321 if (Pstream::parRun())
324 // Every processor goes through all of its edges and creates the
325 // global version of the edge. If such an edge does not
326 // already exist, it is added to the list, if it does, its
327 // mask is set to zero (somebody is already doing the
329 if (Pstream::master())
331 // Master's mask is always one. Add all edges to the list
332 SLList<edge> globalCutEdges;
334 forAll (localCutEdges, edgeI)
336 globalCutEdges.append
340 pointProcAddressing[localCutEdges[edgeI].start()],
341 pointProcAddressing[localCutEdges[edgeI].end()]
346 // Send the list to the first slave
347 OPstream toFirstSlave(Pstream::blocking, Pstream::firstSlave());
348 toFirstSlave << globalCutEdges;
352 // Slave. Read the list from the previous slave, do
353 // local processing and send it to the next slave
356 int slave = Pstream::firstSlave();
357 slave <= Pstream::lastSlave();
361 if (Pstream::myProcNo() == slave)
364 int receiveFrom = slave - 1;
365 int sendTo = slave + 1;
367 if (Pstream::myProcNo() == Pstream::firstSlave())
369 // Receive from master
370 receiveFrom = Pstream::masterNo();
373 IPstream rf(Pstream::blocking, receiveFrom);
374 SLList<edge> globalCutEdges(rf);
376 // Check local cut edges against the list
377 forAll (localCutEdges, edgeI)
382 [localCutEdges[edgeI].start()],
383 pointProcAddressing[localCutEdges[edgeI].end()]
390 SLList<edge>::iterator gEdgeIter =
391 globalCutEdges.begin();
392 gEdgeIter != globalCutEdges.end();
396 if (gEdgeIter() == e)
405 // Edge already exists. Set mask to zero
406 localCutEdgeMask[edgeI] = 0;
410 // Edge not found. Add it to the list
411 globalCutEdges.append(e);
415 // Re-transmit the list to the next processor
416 if (slave < Pstream::lastSlave())
418 OPstream passOnEdges(Pstream::blocking, sendTo);
419 passOnEdges << globalCutEdges;
428 Info<< "void tetPolyMeshCellDecomp::addParallelPointPatch() : "
429 << "Adding globalTetPolyPatchCellDecomp."
430 << " nGlobalPoints: "
431 << mesh_.globalData().nGlobalPoints() << endl;
434 // Add the processor point patch
435 boundary_.setSize(boundary_.size() + 1);
439 boundary_.size() - 1,
440 new globalTetPolyPatchCellDecomp
442 mesh_.globalData().nGlobalPoints(),
444 mesh_.globalData().sharedPointAddr(),
445 globalParEdges.size(),
458 // ************************************************************************* //