1 /*---------------------------------------------------------------------------*\
3 \\ / F ield | cfMesh: A library for mesh generation
5 \\ / A nd | Author: Franjo Juretic (franjo.juretic@c-fields.com)
6 \\/ M anipulation | Copyright (C) Creative Fields, Ltd.
7 -------------------------------------------------------------------------------
9 This file is part of cfMesh.
11 cfMesh is free software; you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by the
13 Free Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
16 cfMesh is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with cfMesh. If not, see <http://www.gnu.org/licenses/>.
26 \*---------------------------------------------------------------------------*/
28 #include "meshOctreeModifier.H"
36 //#define OCTREETiming
37 //#define DEBUGBalancing
39 # ifdef DEBUGBalancing
43 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
48 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
49 // Private member functions
51 void meshOctreeModifier::loadDistribution(const direction usedType)
53 if( octree_.neiProcs().size() == 0 )
57 returnReduce(1, sumOp<label>());
58 const scalar startTime = omp_get_wtime();
60 returnReduce(1, sumOp<label>());
61 const scalar t1 = omp_get_wtime();
62 Info << "Creation of list of leaves lasted " << t1-startTime << endl;
65 const LongList<meshOctreeCube*>& leaves = octree_.leaves_;
67 label localNumWeighs(0);
68 labelList globalLeafWeight(leaves.size());
74 if( leaves[leafI]->cubeType() & usedType )
76 globalLeafWeight[leafI] = localNumWeighs;
81 globalLeafWeight[leafI] = -1;
89 globalLeafWeight[leafI] = localNumWeighs;
95 returnReduce(1, sumOp<label>());
96 const scalar t2 = omp_get_wtime();
97 Info << "Creation of global leaf weights lasted " << t2-t1 << endl;
100 const label totalNumWeights = returnReduce(localNumWeighs, sumOp<label>());
101 const label nWeightsPerProcessor = totalNumWeights / Pstream::nProcs();
103 //- check if balancing should be performed
104 //- the tolerance is set to 5% difference in the number of boxes
105 //- from the ideal one
106 label doBalancing(0);
110 scalar(localNumWeighs - nWeightsPerProcessor) /
116 reduce(doBalancing, maxOp<label>());
118 if( doBalancing == 0 )
121 Info << "Distributing load between processors" << endl;
123 //- start calculating new partitions
124 //- find global labels of the leaf boxes
127 labelList procWeights(Pstream::nProcs());
128 procWeights[Pstream::myProcNo()] = localNumWeighs;
129 Pstream::gatherList(procWeights);
130 Pstream::scatterList(procWeights);
132 for(label procI=0;procI<Pstream::myProcNo();++procI)
133 doBalancing += procWeights[procI];
135 forAll(globalLeafWeight, lI)
137 if( globalLeafWeight[lI] != -1 )
138 globalLeafWeight[lI] += doBalancing;
141 //- leaf boxes which are not in the range for the current processor
142 //- shall be migrated to other processors
143 std::map<label, labelLongList> leavesToSend;
145 bool oneRemainingBox(false);
146 forAll(globalLeafWeight, leafI)
148 if( globalLeafWeight[leafI] == -1 )
150 if( !oneRemainingBox && (leafI == leaves.size() -1) )
153 const label newProc =
156 globalLeafWeight[leafI] / nWeightsPerProcessor,
160 if( newProc != Pstream::myProcNo() )
162 leavesToSend[newProc].append(leafI);
163 leaves[leafI]->setProcNo(newProc);
165 # ifdef DEBUGBalancing
166 if( leaves[leafI]->hasContainedElements() )
167 Serr << Pstream::myProcNo() << "Deleting a DATA cube "
168 << leaves[leafI]->coordinates() << " data is "
169 << leaves[leafI]->containedElements() << endl;
174 oneRemainingBox = true;
179 returnReduce(1, sumOp<label>());
180 const scalar t3 = omp_get_wtime();
181 Info << "Completed assignment of leaves to processors in " << t3-t2 << endl;
184 //- send the information to other processors
185 //- all processors shall received a list containing the same information
186 //- each processor informs which other processors shall receive data from
188 labelListList sendToProcesssors(Pstream::nProcs());
189 sendToProcesssors[Pstream::myProcNo()].setSize(leavesToSend.size());
193 std::map<label, labelLongList>::const_iterator it=leavesToSend.begin();
194 it!=leavesToSend.end();
197 sendToProcesssors[Pstream::myProcNo()][counter++] = it->first;
199 Pstream::gatherList(sendToProcesssors);
200 Pstream::scatterList(sendToProcesssors);
202 labelHashSet receiveFrom;
203 forAll(sendToProcesssors, procI)
204 forAll(sendToProcesssors[procI], neiI)
205 if( sendToProcesssors[procI][neiI] == Pstream::myProcNo() )
206 receiveFrom.insert(procI);
208 //- send the coordinates of the boxes to other processors
209 const labelList& sendToProcs = sendToProcesssors[Pstream::myProcNo()];
210 forAll(sendToProcs, i)
212 const label procI = sendToProcs[i];
214 List<meshOctreeCubeBasic> sendCoordinates
216 leavesToSend[procI].size()
219 forAll(leavesToSend[procI], lI)
221 const meshOctreeCube& oc = *leaves[leavesToSend[procI][lI]];
222 sendCoordinates[lI] =
234 sendCoordinates.byteSize()
237 toOtherProc << sendCoordinates;
240 //- receive data sent from other processors
241 LongList<meshOctreeCubeBasic> migratedCubes;
242 forAllConstIter(labelHashSet, receiveFrom, iter)
244 List<meshOctreeCubeBasic> mc;
246 IPstream fromOtherProc(Pstream::blocking, iter.key());
250 label currSize = migratedCubes.size();
251 migratedCubes.setSize(currSize+mc.size());
254 migratedCubes[currSize] = mc[mcI];
260 returnReduce(1, sumOp<label>());
261 const scalar t4 = omp_get_wtime();
262 Info << "Data exchange lasted " << t4-t3 << endl;
265 //- delete cubes which have been moved to other processors
266 octree_.initialCubePtr_->purgeProcessorCubes(Pstream::myProcNo());
269 returnReduce(1, sumOp<label>());
270 const scalar t5 = omp_get_wtime();
271 Info << "Purging lasted " << t5-t4 << endl;
274 //- create boxes from the received coordinates
275 forAll(migratedCubes, mcI)
277 refineTreeForCoordinates
279 migratedCubes[mcI].coordinates(),
281 migratedCubes[mcI].cubeType()
285 createListOfLeaves();
288 returnReduce(1, sumOp<label>());
289 const scalar t6 = omp_get_wtime();
290 Info << "Tree refinement lasted " << t6-t5 << endl;
293 //- update the communication pattern
294 updateCommunicationPattern();
297 returnReduce(1, sumOp<label>());
298 const scalar endTime = omp_get_wtime();
299 Info << "Updating of communication pattern lasted " << endTime-t6 << endl;
300 Info << "Time for load balancing is " << endTime-startTime << endl;
303 Info << "Finished distributing load between processors" << endl;
306 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
308 } // End namespace Foam
310 // ************************************************************************* //