ENH: autoLayerDriver: better layering information message
[OpenFOAM-2.0.x.git] / src / Pstream / mpi / UPstream.C
blobc17e451b9f747d802787ca22745ed1b5a318ca27
1 /*---------------------------------------------------------------------------*\
2   =========                 |
3   \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
4    \\    /   O peration     |
5     \\  /    A nd           | Copyright (C) 2011 OpenFOAM Foundation
6      \\/     M anipulation  |
7 -------------------------------------------------------------------------------
8 License
9     This file is part of OpenFOAM.
11     OpenFOAM is free software: you can redistribute it and/or modify it
12     under the terms of the GNU General Public License as published by
13     the Free Software Foundation, either version 3 of the License, or
14     (at your option) any later version.
16     OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18     FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19     for more details.
21     You should have received a copy of the GNU General Public License
22     along with OpenFOAM.  If not, see <http://www.gnu.org/licenses/>.
24 \*---------------------------------------------------------------------------*/
26 #include "mpi.h"
28 #include "UPstream.H"
29 #include "PstreamReduceOps.H"
30 #include "OSspecific.H"
31 #include "PstreamGlobals.H"
33 #include <cstring>
34 #include <cstdlib>
35 #include <csignal>
37 #if defined(WM_SP)
38 #   define MPI_SCALAR MPI_FLOAT
39 #elif defined(WM_DP)
40 #   define MPI_SCALAR MPI_DOUBLE
41 #endif
43 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
46 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
48 // NOTE:
49 // valid parallel options vary between implementations, but flag common ones.
50 // if they are not removed by MPI_Init(), the subsequent argument processing
51 // will notice that they are wrong
52 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
54     validParOptions.insert("np", "");
55     validParOptions.insert("p4pg", "PI file");
56     validParOptions.insert("p4wd", "directory");
57     validParOptions.insert("p4amslave", "");
58     validParOptions.insert("p4yourname", "hostname");
59     validParOptions.insert("GAMMANP", "number of instances");
60     validParOptions.insert("machinefile", "machine file");
64 bool Foam::UPstream::init(int& argc, char**& argv)
66     MPI_Init(&argc, &argv);
68     int numprocs;
69     MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
70     MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo_);
72     if (debug)
73     {
74         Pout<< "UPstream::init : initialised with numProcs:" << numprocs
75             << " myProcNo:" << myProcNo_ << endl;
76     }
78     if (numprocs <= 1)
79     {
80         FatalErrorIn("UPstream::init(int& argc, char**& argv)")
81             << "bool IPstream::init(int& argc, char**& argv) : "
82                "attempt to run parallel on 1 processor"
83             << Foam::abort(FatalError);
84     }
86     procIDs_.setSize(numprocs);
88     forAll(procIDs_, procNo)
89     {
90         procIDs_[procNo] = procNo;
91     }
93     setParRun();
95 #   ifndef SGIMPI
96     string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
98     if (bufferSizeName.size())
99     {
100         int bufferSize = atoi(bufferSizeName.c_str());
102         if (bufferSize)
103         {
104             MPI_Buffer_attach(new char[bufferSize], bufferSize);
105         }
106     }
107     else
108     {
109         FatalErrorIn("UPstream::init(int& argc, char**& argv)")
110             << "UPstream::init(int& argc, char**& argv) : "
111             << "environment variable MPI_BUFFER_SIZE not defined"
112             << Foam::abort(FatalError);
113     }
114 #   endif
116     int processorNameLen;
117     char processorName[MPI_MAX_PROCESSOR_NAME];
119     MPI_Get_processor_name(processorName, &processorNameLen);
121     //signal(SIGABRT, stop);
123     // Now that nprocs is known construct communication tables.
124     initCommunicationSchedule();
126     return true;
130 void Foam::UPstream::exit(int errnum)
132     if (debug)
133     {
134         Pout<< "UPstream::exit." << endl;
135     }
137 #   ifndef SGIMPI
138     int size;
139     char* buff;
140     MPI_Buffer_detach(&buff, &size);
141     delete[] buff;
142 #   endif
144     if (PstreamGlobals::outstandingRequests_.size())
145     {
146         label n = PstreamGlobals::outstandingRequests_.size();
147         PstreamGlobals::outstandingRequests_.clear();
149         WarningIn("UPstream::exit(int)")
150             << "There are still " << n << " outstanding MPI_Requests." << endl
151             << "This means that your code exited before doing a"
152             << " UPstream::waitRequests()." << endl
153             << "This should not happen for a normal code exit."
154             << endl;
155     }
157     if (errnum == 0)
158     {
159         MPI_Finalize();
160         ::exit(errnum);
161     }
162     else
163     {
164         MPI_Abort(MPI_COMM_WORLD, errnum);
165     }
169 void Foam::UPstream::abort()
171     MPI_Abort(MPI_COMM_WORLD, 1);
175 void Foam::reduce(scalar& Value, const sumOp<scalar>& bop)
177     if (Pstream::debug)
178     {
179         Pout<< "Foam::reduce : value:" << Value << endl;
180     }
182     if (!UPstream::parRun())
183     {
184         return;
185     }
187     if (UPstream::nProcs() <= UPstream::nProcsSimpleSum)
188     {
189         if (UPstream::master())
190         {
191             for
192             (
193                 int slave=UPstream::firstSlave();
194                 slave<=UPstream::lastSlave();
195                 slave++
196             )
197             {
198                 scalar value;
200                 if
201                 (
202                     MPI_Recv
203                     (
204                         &value,
205                         1,
206                         MPI_SCALAR,
207                         UPstream::procID(slave),
208                         UPstream::msgType(),
209                         MPI_COMM_WORLD,
210                         MPI_STATUS_IGNORE
211                     )
212                 )
213                 {
214                     FatalErrorIn
215                     (
216                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
217                     )   << "MPI_Recv failed"
218                         << Foam::abort(FatalError);
219                 }
221                 Value = bop(Value, value);
222             }
223         }
224         else
225         {
226             if
227             (
228                 MPI_Send
229                 (
230                     &Value,
231                     1,
232                     MPI_SCALAR,
233                     UPstream::procID(UPstream::masterNo()),
234                     UPstream::msgType(),
235                     MPI_COMM_WORLD
236                 )
237             )
238             {
239                 FatalErrorIn
240                 (
241                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
242                 )   << "MPI_Send failed"
243                     << Foam::abort(FatalError);
244             }
245         }
248         if (UPstream::master())
249         {
250             for
251             (
252                 int slave=UPstream::firstSlave();
253                 slave<=UPstream::lastSlave();
254                 slave++
255             )
256             {
257                 if
258                 (
259                     MPI_Send
260                     (
261                         &Value,
262                         1,
263                         MPI_SCALAR,
264                         UPstream::procID(slave),
265                         UPstream::msgType(),
266                         MPI_COMM_WORLD
267                     )
268                 )
269                 {
270                     FatalErrorIn
271                     (
272                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
273                     )   << "MPI_Send failed"
274                         << Foam::abort(FatalError);
275                 }
276             }
277         }
278         else
279         {
280             if
281             (
282                 MPI_Recv
283                 (
284                     &Value,
285                     1,
286                     MPI_SCALAR,
287                     UPstream::procID(UPstream::masterNo()),
288                     UPstream::msgType(),
289                     MPI_COMM_WORLD,
290                     MPI_STATUS_IGNORE
291                 )
292             )
293             {
294                 FatalErrorIn
295                 (
296                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
297                 )   << "MPI_Recv failed"
298                     << Foam::abort(FatalError);
299             }
300         }
301     }
302     else
303     {
304         scalar sum;
305         MPI_Allreduce(&Value, &sum, 1, MPI_SCALAR, MPI_SUM, MPI_COMM_WORLD);
306         Value = sum;
308         /*
309         int myProcNo = UPstream::myProcNo();
310         int nProcs = UPstream::nProcs();
312         //
313         // receive from children
314         //
315         int level = 1;
316         int thisLevelOffset = 2;
317         int childLevelOffset = thisLevelOffset/2;
318         int childProcId = 0;
320         while
321         (
322             (childLevelOffset < nProcs)
323          && (myProcNo % thisLevelOffset) == 0
324         )
325         {
326             childProcId = myProcNo + childLevelOffset;
328             scalar value;
330             if (childProcId < nProcs)
331             {
332                 if
333                 (
334                     MPI_Recv
335                     (
336                         &value,
337                         1,
338                         MPI_SCALAR,
339                         UPstream::procID(childProcId),
340                         UPstream::msgType(),
341                         MPI_COMM_WORLD,
342                         MPI_STATUS_IGNORE
343                     )
344                 )
345                 {
346                     FatalErrorIn
347                     (
348                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
349                     )   << "MPI_Recv failed"
350                         << Foam::abort(FatalError);
351                 }
353                 Value = bop(Value, value);
354             }
356             level++;
357             thisLevelOffset <<= 1;
358             childLevelOffset = thisLevelOffset/2;
359         }
361         //
362         // send and receive from parent
363         //
364         if (!UPstream::master())
365         {
366             int parentId = myProcNo - (myProcNo % thisLevelOffset);
368             if
369             (
370                 MPI_Send
371                 (
372                     &Value,
373                     1,
374                     MPI_SCALAR,
375                     UPstream::procID(parentId),
376                     UPstream::msgType(),
377                     MPI_COMM_WORLD
378                 )
379             )
380             {
381                 FatalErrorIn
382                 (
383                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
384                 )   << "MPI_Send failed"
385                     << Foam::abort(FatalError);
386             }
388             if
389             (
390                 MPI_Recv
391                 (
392                     &Value,
393                     1,
394                     MPI_SCALAR,
395                     UPstream::procID(parentId),
396                     UPstream::msgType(),
397                     MPI_COMM_WORLD,
398                     MPI_STATUS_IGNORE
399                 )
400             )
401             {
402                 FatalErrorIn
403                 (
404                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
405                 )   << "MPI_Recv failed"
406                     << Foam::abort(FatalError);
407             }
408         }
411         //
412         // distribute to my children
413         //
414         level--;
415         thisLevelOffset >>= 1;
416         childLevelOffset = thisLevelOffset/2;
418         while (level > 0)
419         {
420             childProcId = myProcNo + childLevelOffset;
422             if (childProcId < nProcs)
423             {
424                 if
425                 (
426                     MPI_Send
427                     (
428                         &Value,
429                         1,
430                         MPI_SCALAR,
431                         UPstream::procID(childProcId),
432                         UPstream::msgType(),
433                         MPI_COMM_WORLD
434                     )
435                 )
436                 {
437                     FatalErrorIn
438                     (
439                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
440                     )   << "MPI_Send failed"
441                         << Foam::abort(FatalError);
442                 }
443             }
445             level--;
446             thisLevelOffset >>= 1;
447             childLevelOffset = thisLevelOffset/2;
448         }
449         */
450     }
452     if (Pstream::debug)
453     {
454         Pout<< "Foam::reduce : reduced value:" << Value << endl;
455     }
459 void Foam::UPstream::waitRequests()
461     if (debug)
462     {
463         Pout<< "UPstream::waitRequests : starting wait for "
464             << PstreamGlobals::outstandingRequests_.size()
465             << " outstanding requests." << endl;
466     }
468     if (PstreamGlobals::outstandingRequests_.size())
469     {
470         if
471         (
472             MPI_Waitall
473             (
474                 PstreamGlobals::outstandingRequests_.size(),
475                 PstreamGlobals::outstandingRequests_.begin(),
476                 MPI_STATUSES_IGNORE
477             )
478         )
479         {
480             FatalErrorIn
481             (
482                 "UPstream::waitRequests()"
483             )   << "MPI_Waitall returned with error" << Foam::endl;
484         }
486         PstreamGlobals::outstandingRequests_.clear();
487     }
489     if (debug)
490     {
491         Pout<< "UPstream::waitRequests : finished wait." << endl;
492     }
496 bool Foam::UPstream::finishedRequest(const label i)
498     if (debug)
499     {
500         Pout<< "UPstream::waitRequests : starting wait for request:" << i
501             << endl;
502     }
504     if (i >= PstreamGlobals::outstandingRequests_.size())
505     {
506         FatalErrorIn
507         (
508             "UPstream::finishedRequest(const label)"
509         )   << "There are " << PstreamGlobals::outstandingRequests_.size()
510             << " outstanding send requests and you are asking for i=" << i
511             << nl
512             << "Maybe you are mixing blocking/non-blocking comms?"
513             << Foam::abort(FatalError);
514     }
516     int flag;
517     MPI_Test
518     (
519        &PstreamGlobals::outstandingRequests_[i],
520        &flag,
521         MPI_STATUS_IGNORE
522     );
524     if (debug)
525     {
526         Pout<< "UPstream::waitRequests : finished wait for request:" << i
527             << endl;
528     }
530     return flag != 0;
534 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
536 // ************************************************************************* //