ENH: autoLayerDriver: better layering information message
[OpenFOAM-2.0.x.git] / src / OpenFOAM / meshes / ProcessorTopology / ProcessorTopology.C
blobf5a6c72fa80bc9ac271c3b21105f473ce2309a22
1 /*---------------------------------------------------------------------------*\
2   =========                 |
3   \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
4    \\    /   O peration     |
5     \\  /    A nd           | Copyright (C) 2011 OpenFOAM Foundation
6      \\/     M anipulation  |
7 -------------------------------------------------------------------------------
8 License
9     This file is part of OpenFOAM.
11     OpenFOAM is free software: you can redistribute it and/or modify it
12     under the terms of the GNU General Public License as published by
13     the Free Software Foundation, either version 3 of the License, or
14     (at your option) any later version.
16     OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18     FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19     for more details.
21     You should have received a copy of the GNU General Public License
22     along with OpenFOAM.  If not, see <http://www.gnu.org/licenses/>.
24 \*---------------------------------------------------------------------------*/
26 #include "ProcessorTopology.H"
27 #include "ListOps.H"
28 #include "Pstream.H"
29 #include "commSchedule.H"
30 #include "boolList.H"
32 // * * * * * * * * * * * * * Private Member Functions  * * * * * * * * * * * //
34 template<class Patch, class ProcPatch>
35 Foam::labelList Foam::ProcessorTopology<Patch, ProcPatch>::procNeighbours
37     const PtrList<Patch>& patches
40     // Determine number of processor neighbours and max neighbour id.
42     label nNeighbours = 0;
44     label maxNb = 0;
46     boolList isNeighbourProc(Pstream::nProcs(), false);
48     forAll(patches, patchi)
49     {
50         const Patch& patch = patches[patchi];
52         if (isA<ProcPatch>(patch))
53         {
54             const ProcPatch& procPatch =
55                 refCast<const ProcPatch>(patch);
57             label pNeighbProcNo = procPatch.neighbProcNo();
59             if (!isNeighbourProc[pNeighbProcNo])
60             {
61                 nNeighbours++;
63                 maxNb = max(maxNb, procPatch.neighbProcNo());
65                 isNeighbourProc[pNeighbProcNo] = true;
66             }
67         }
68     }
70     labelList neighbours(nNeighbours, -1);
72     nNeighbours = 0;
74     forAll(isNeighbourProc, procI)
75     {
76         if (isNeighbourProc[procI])
77         {
78             neighbours[nNeighbours++] = procI;
79         }
80     }
82     procPatchMap_.setSize(maxNb + 1);
83     procPatchMap_ = -1;
85     forAll(patches, patchi)
86     {
87         const Patch& patch = patches[patchi];
89         if (isA<ProcPatch>(patch))
90         {
91             const ProcPatch& procPatch =
92                 refCast<const ProcPatch>(patch);
94             // Construct reverse map
95             procPatchMap_[procPatch.neighbProcNo()] = patchi;
96         }
97     }
99     return neighbours;
103 // * * * * * * * * * * * * * * * * Constructors  * * * * * * * * * * * * * * //
105 // Construct from components
106 template<class Patch, class ProcPatch>
107 Foam::ProcessorTopology<Patch, ProcPatch>::ProcessorTopology
109     const PtrList<Patch>& patches
112     labelListList(Pstream::nProcs()),
113     patchSchedule_(2*patches.size())
115     if (Pstream::parRun())
116     {
117         // Fill my 'slot' with my neighbours
118         operator[](Pstream::myProcNo()) = procNeighbours(patches);
120         // Distribute to all processors
121         Pstream::gatherList(*this);
122         Pstream::scatterList(*this);
123     }
125     if (Pstream::parRun() && Pstream::defaultCommsType == Pstream::scheduled)
126     {
127         label patchEvali = 0;
129         // 1. All non-processor patches
130         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
132         forAll(patches, patchi)
133         {
134             if (!isA<ProcPatch>(patches[patchi]))
135             {
136                 patchSchedule_[patchEvali].patch = patchi;
137                 patchSchedule_[patchEvali++].init = true;
138                 patchSchedule_[patchEvali].patch = patchi;
139                 patchSchedule_[patchEvali++].init = false;
140             }
141         }
143         // 2. All processor patches
144         // ~~~~~~~~~~~~~~~~~~~~~~~~
146         // Determine the schedule for all. Insert processor pair once
147         // to determine the schedule. Each processor pair stands for both
148         // send and receive.
149         label nComms = 0;
150         forAll(*this, procI)
151         {
152             nComms += operator[](procI).size();
153         }
154         DynamicList<labelPair> comms(nComms);
156         forAll(*this, procI)
157         {
158             const labelList& nbrs = operator[](procI);
160             forAll(nbrs, i)
161             {
162                 if (procI < nbrs[i])
163                 {
164                     comms.append(labelPair(procI, nbrs[i]));
165                 }
166             }
167         }
168         comms.shrink();
170         // Determine a schedule.
171         labelList mySchedule
172         (
173             commSchedule
174             (
175                 Pstream::nProcs(),
176                 comms
177             ).procSchedule()[Pstream::myProcNo()]
178         );
180         forAll(mySchedule, iter)
181         {
182             label commI = mySchedule[iter];
184             // Get the other processor
185             label nb = comms[commI][0];
186             if (nb == Pstream::myProcNo())
187             {
188                 nb = comms[commI][1];
189             }
190             label patchi = procPatchMap_[nb];
192             if (Pstream::myProcNo() > nb)
193             {
194                 patchSchedule_[patchEvali].patch = patchi;
195                 patchSchedule_[patchEvali++].init = true;
196                 patchSchedule_[patchEvali].patch = patchi;
197                 patchSchedule_[patchEvali++].init = false;
198             }
199             else
200             {
201                 patchSchedule_[patchEvali].patch = patchi;
202                 patchSchedule_[patchEvali++].init = false;
203                 patchSchedule_[patchEvali].patch = patchi;
204                 patchSchedule_[patchEvali++].init = true;
205             }
206         }
207     }
208     else
209     {
210         label patchEvali = 0;
212         // 1. All non-processor patches
213         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
215         // Have evaluate directly after initEvaluate. Could have them separated
216         // as long as they're not intermingled with processor patches since
217         // then e.g. any reduce parallel traffic would interfere with the
218         // processor swaps.
220         forAll(patches, patchi)
221         {
222             if (!isA<ProcPatch>(patches[patchi]))
223             {
224                 patchSchedule_[patchEvali].patch = patchi;
225                 patchSchedule_[patchEvali++].init = true;
226                 patchSchedule_[patchEvali].patch = patchi;
227                 patchSchedule_[patchEvali++].init = false;
228             }
229         }
231         // 2. All processor patches
232         // ~~~~~~~~~~~~~~~~~~~~~~~~
234         // 2a. initEvaluate
235         forAll(patches, patchi)
236         {
237             if (isA<ProcPatch>(patches[patchi]))
238             {
239                 patchSchedule_[patchEvali].patch = patchi;
240                 patchSchedule_[patchEvali++].init = true;
241             }
242         }
244         // 2b. evaluate
245         forAll(patches, patchi)
246         {
247             if (isA<ProcPatch>(patches[patchi]))
248             {
249                 patchSchedule_[patchEvali].patch = patchi;
250                 patchSchedule_[patchEvali++].init = false;
251             }
252         }
253     }
257 // ************************************************************************* //