1 /*---------------------------------------------------------------------------*\
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
5 \\ / A nd | Copyright (C) 2011 OpenFOAM Foundation
7 -------------------------------------------------------------------------------
9 This file is part of OpenFOAM.
11 OpenFOAM is free software: you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by
13 the Free Software Foundation, either version 3 of the License, or
14 (at your option) any later version.
16 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
24 \*---------------------------------------------------------------------------*/
29 #include "PstreamReduceOps.H"
30 #include "OSspecific.H"
31 #include "PstreamGlobals.H"
38 # define MPI_SCALAR MPI_FLOAT
40 # define MPI_SCALAR MPI_DOUBLE
43 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
46 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
49 // valid parallel options vary between implementations, but flag common ones.
50 // if they are not removed by MPI_Init(), the subsequent argument processing
51 // will notice that they are wrong
52 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
54 validParOptions.insert("np", "");
55 validParOptions.insert("p4pg", "PI file");
56 validParOptions.insert("p4wd", "directory");
57 validParOptions.insert("p4amslave", "");
58 validParOptions.insert("p4yourname", "hostname");
59 validParOptions.insert("GAMMANP", "number of instances");
60 validParOptions.insert("machinefile", "machine file");
64 bool Foam::UPstream::init(int& argc, char**& argv)
66 MPI_Init(&argc, &argv);
69 MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
70 MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo_);
74 Pout<< "UPstream::init : initialised with numProcs:" << numprocs
75 << " myProcNo:" << myProcNo_ << endl;
80 FatalErrorIn("UPstream::init(int& argc, char**& argv)")
81 << "bool IPstream::init(int& argc, char**& argv) : "
82 "attempt to run parallel on 1 processor"
83 << Foam::abort(FatalError);
86 procIDs_.setSize(numprocs);
88 forAll(procIDs_, procNo)
90 procIDs_[procNo] = procNo;
96 string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
98 if (bufferSizeName.size())
100 int bufferSize = atoi(bufferSizeName.c_str());
104 MPI_Buffer_attach(new char[bufferSize], bufferSize);
109 FatalErrorIn("UPstream::init(int& argc, char**& argv)")
110 << "UPstream::init(int& argc, char**& argv) : "
111 << "environment variable MPI_BUFFER_SIZE not defined"
112 << Foam::abort(FatalError);
116 int processorNameLen;
117 char processorName[MPI_MAX_PROCESSOR_NAME];
119 MPI_Get_processor_name(processorName, &processorNameLen);
121 //signal(SIGABRT, stop);
123 // Now that nprocs is known construct communication tables.
124 initCommunicationSchedule();
130 void Foam::UPstream::exit(int errnum)
134 Pout<< "UPstream::exit." << endl;
140 MPI_Buffer_detach(&buff, &size);
144 if (PstreamGlobals::outstandingRequests_.size())
146 label n = PstreamGlobals::outstandingRequests_.size();
147 PstreamGlobals::outstandingRequests_.clear();
149 WarningIn("UPstream::exit(int)")
150 << "There are still " << n << " outstanding MPI_Requests." << endl
151 << "This means that your code exited before doing a"
152 << " UPstream::waitRequests()." << endl
153 << "This should not happen for a normal code exit."
164 MPI_Abort(MPI_COMM_WORLD, errnum);
169 void Foam::UPstream::abort()
171 MPI_Abort(MPI_COMM_WORLD, 1);
175 void Foam::reduce(scalar& Value, const sumOp<scalar>& bop)
179 Pout<< "Foam::reduce : value:" << Value << endl;
182 if (!UPstream::parRun())
187 if (UPstream::nProcs() <= UPstream::nProcsSimpleSum)
189 if (UPstream::master())
193 int slave=UPstream::firstSlave();
194 slave<=UPstream::lastSlave();
207 UPstream::procID(slave),
216 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
217 ) << "MPI_Recv failed"
218 << Foam::abort(FatalError);
221 Value = bop(Value, value);
233 UPstream::procID(UPstream::masterNo()),
241 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
242 ) << "MPI_Send failed"
243 << Foam::abort(FatalError);
248 if (UPstream::master())
252 int slave=UPstream::firstSlave();
253 slave<=UPstream::lastSlave();
264 UPstream::procID(slave),
272 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
273 ) << "MPI_Send failed"
274 << Foam::abort(FatalError);
287 UPstream::procID(UPstream::masterNo()),
296 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
297 ) << "MPI_Recv failed"
298 << Foam::abort(FatalError);
305 MPI_Allreduce(&Value, &sum, 1, MPI_SCALAR, MPI_SUM, MPI_COMM_WORLD);
309 int myProcNo = UPstream::myProcNo();
310 int nProcs = UPstream::nProcs();
313 // receive from children
316 int thisLevelOffset = 2;
317 int childLevelOffset = thisLevelOffset/2;
322 (childLevelOffset < nProcs)
323 && (myProcNo % thisLevelOffset) == 0
326 childProcId = myProcNo + childLevelOffset;
330 if (childProcId < nProcs)
339 UPstream::procID(childProcId),
348 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
349 ) << "MPI_Recv failed"
350 << Foam::abort(FatalError);
353 Value = bop(Value, value);
357 thisLevelOffset <<= 1;
358 childLevelOffset = thisLevelOffset/2;
362 // send and receive from parent
364 if (!UPstream::master())
366 int parentId = myProcNo - (myProcNo % thisLevelOffset);
375 UPstream::procID(parentId),
383 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
384 ) << "MPI_Send failed"
385 << Foam::abort(FatalError);
395 UPstream::procID(parentId),
404 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
405 ) << "MPI_Recv failed"
406 << Foam::abort(FatalError);
412 // distribute to my children
415 thisLevelOffset >>= 1;
416 childLevelOffset = thisLevelOffset/2;
420 childProcId = myProcNo + childLevelOffset;
422 if (childProcId < nProcs)
431 UPstream::procID(childProcId),
439 "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
440 ) << "MPI_Send failed"
441 << Foam::abort(FatalError);
446 thisLevelOffset >>= 1;
447 childLevelOffset = thisLevelOffset/2;
454 Pout<< "Foam::reduce : reduced value:" << Value << endl;
459 void Foam::UPstream::waitRequests()
463 Pout<< "UPstream::waitRequests : starting wait for "
464 << PstreamGlobals::outstandingRequests_.size()
465 << " outstanding requests." << endl;
468 if (PstreamGlobals::outstandingRequests_.size())
474 PstreamGlobals::outstandingRequests_.size(),
475 PstreamGlobals::outstandingRequests_.begin(),
482 "UPstream::waitRequests()"
483 ) << "MPI_Waitall returned with error" << Foam::endl;
486 PstreamGlobals::outstandingRequests_.clear();
491 Pout<< "UPstream::waitRequests : finished wait." << endl;
496 bool Foam::UPstream::finishedRequest(const label i)
500 Pout<< "UPstream::waitRequests : starting wait for request:" << i
504 if (i >= PstreamGlobals::outstandingRequests_.size())
508 "UPstream::finishedRequest(const label)"
509 ) << "There are " << PstreamGlobals::outstandingRequests_.size()
510 << " outstanding send requests and you are asking for i=" << i
512 << "Maybe you are mixing blocking/non-blocking comms?"
513 << Foam::abort(FatalError);
519 &PstreamGlobals::outstandingRequests_[i],
526 Pout<< "UPstream::waitRequests : finished wait for request:" << i
534 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
536 // ************************************************************************* //