Update instructions in containers.rst
[gromacs.git] / src / gromacs / domdec / collect.cpp
blob0924461f7889ff28364c06e9e66241240df54235
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /* \internal \file
37 * \brief Implements functions to collect state data to the master rank.
39 * \author Berk Hess <hess@kth.se>
40 * \ingroup module_domdec
43 #include "gmxpre.h"
45 #include "collect.h"
47 #include "config.h"
49 #include "gromacs/domdec/domdec_network.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/mdtypes/state.h"
52 #include "gromacs/utility/fatalerror.h"
54 #include "atomdistribution.h"
55 #include "distribute.h"
56 #include "domdec_internal.h"
58 static void dd_collect_cg(gmx_domdec_t* dd,
59 const int ddpCount,
60 const int ddpCountCgGl,
61 gmx::ArrayRef<const int> localCGNumbers)
63 if (ddpCount == dd->comm->master_cg_ddp_count)
65 /* The master has the correct distribution */
66 return;
69 gmx::ArrayRef<const int> atomGroups;
70 int nat_home = 0;
72 if (ddpCount == dd->ddp_count)
74 /* The local state and DD are in sync, use the DD indices */
75 atomGroups = gmx::constArrayRefFromArray(dd->globalAtomGroupIndices.data(), dd->ncg_home);
76 nat_home = dd->comm->atomRanges.numHomeAtoms();
78 else if (ddpCountCgGl == ddpCount)
80 /* The DD is out of sync with the local state, but we have stored
81 * the cg indices with the local state, so we can use those.
83 atomGroups = localCGNumbers;
84 nat_home = atomGroups.size();
86 else
88 gmx_incons(
89 "Attempted to collect a vector for a state for which the charge group distribution "
90 "is unknown");
93 AtomDistribution* ma = dd->ma.get();
95 /* Collect the charge group and atom counts on the master */
96 int localBuffer[2] = { static_cast<int>(atomGroups.size()), nat_home };
97 dd_gather(dd, 2 * sizeof(int), localBuffer, DDMASTER(dd) ? ma->intBuffer.data() : nullptr);
99 if (DDMASTER(dd))
101 int groupOffset = 0;
102 for (int rank = 0; rank < dd->nnodes; rank++)
104 auto& domainGroups = ma->domainGroups[rank];
105 int numGroups = ma->intBuffer[2 * rank];
107 domainGroups.atomGroups =
108 gmx::constArrayRefFromArray(ma->atomGroups.data() + groupOffset, numGroups);
110 domainGroups.numAtoms = ma->intBuffer[2 * rank + 1];
112 groupOffset += numGroups;
115 if (debug)
117 fprintf(debug, "Initial charge group distribution: ");
118 for (int rank = 0; rank < dd->nnodes; rank++)
120 fprintf(debug, " %td", ma->domainGroups[rank].atomGroups.ssize());
122 fprintf(debug, "\n");
125 /* Make byte counts and indices */
126 int offset = 0;
127 for (int rank = 0; rank < dd->nnodes; rank++)
129 int numGroups = ma->domainGroups[rank].atomGroups.size();
130 ma->intBuffer[rank] = numGroups * sizeof(int);
131 ma->intBuffer[dd->nnodes + rank] = offset * sizeof(int);
132 offset += numGroups;
136 /* Collect the charge group indices on the master */
137 dd_gatherv(dd, atomGroups.size() * sizeof(int), atomGroups.data(),
138 DDMASTER(dd) ? ma->intBuffer.data() : nullptr,
139 DDMASTER(dd) ? ma->intBuffer.data() + dd->nnodes : nullptr,
140 DDMASTER(dd) ? ma->atomGroups.data() : nullptr);
142 dd->comm->master_cg_ddp_count = ddpCount;
145 static void dd_collect_vec_sendrecv(gmx_domdec_t* dd,
146 gmx::ArrayRef<const gmx::RVec> lv,
147 gmx::ArrayRef<gmx::RVec> v)
149 if (!DDMASTER(dd))
151 #if GMX_MPI
152 const int numHomeAtoms = dd->comm->atomRanges.numHomeAtoms();
153 MPI_Send(const_cast<void*>(static_cast<const void*>(lv.data())),
154 numHomeAtoms * sizeof(rvec), MPI_BYTE, dd->masterrank, dd->rank, dd->mpi_comm_all);
155 #endif
157 else
159 AtomDistribution& ma = *dd->ma;
161 int rank = dd->masterrank;
162 int localAtom = 0;
163 for (const int& globalAtom : ma.domainGroups[rank].atomGroups)
165 copy_rvec(lv[localAtom++], v[globalAtom]);
168 for (int rank = 0; rank < dd->nnodes; rank++)
170 if (rank != dd->rank)
172 const auto& domainGroups = ma.domainGroups[rank];
174 GMX_RELEASE_ASSERT(v.data() != ma.rvecBuffer.data(),
175 "We need different communication and return buffers");
177 /* When we send/recv instead of scatter/gather, we might need
178 * to increase the communication buffer size here.
180 if (static_cast<size_t>(domainGroups.numAtoms) > ma.rvecBuffer.size())
182 ma.rvecBuffer.resize(domainGroups.numAtoms);
185 #if GMX_MPI
186 MPI_Recv(ma.rvecBuffer.data(), domainGroups.numAtoms * sizeof(rvec), MPI_BYTE, rank,
187 rank, dd->mpi_comm_all, MPI_STATUS_IGNORE);
188 #endif
189 int localAtom = 0;
190 for (const int& globalAtom : domainGroups.atomGroups)
192 copy_rvec(ma.rvecBuffer[localAtom++], v[globalAtom]);
199 static void dd_collect_vec_gatherv(gmx_domdec_t* dd,
200 gmx::ArrayRef<const gmx::RVec> lv,
201 gmx::ArrayRef<gmx::RVec> v)
203 int* recvCounts = nullptr;
204 int* displacements = nullptr;
206 if (DDMASTER(dd))
208 get_commbuffer_counts(dd->ma.get(), &recvCounts, &displacements);
211 const int numHomeAtoms = dd->comm->atomRanges.numHomeAtoms();
212 dd_gatherv(dd, numHomeAtoms * sizeof(rvec), lv.data(), recvCounts, displacements,
213 DDMASTER(dd) ? dd->ma->rvecBuffer.data() : nullptr);
215 if (DDMASTER(dd))
217 const AtomDistribution& ma = *dd->ma;
219 int bufferAtom = 0;
220 for (int rank = 0; rank < dd->nnodes; rank++)
222 const auto& domainGroups = ma.domainGroups[rank];
223 for (const int& globalAtom : domainGroups.atomGroups)
225 copy_rvec(ma.rvecBuffer[bufferAtom++], v[globalAtom]);
231 void dd_collect_vec(gmx_domdec_t* dd,
232 const int ddpCount,
233 const int ddpCountCgGl,
234 gmx::ArrayRef<const int> localCGNumbers,
235 gmx::ArrayRef<const gmx::RVec> localVector,
236 gmx::ArrayRef<gmx::RVec> globalVector)
238 dd_collect_cg(dd, ddpCount, ddpCountCgGl, localCGNumbers);
240 if (dd->nnodes <= c_maxNumRanksUseSendRecvForScatterAndGather)
242 dd_collect_vec_sendrecv(dd, localVector, globalVector);
244 else
246 dd_collect_vec_gatherv(dd, localVector, globalVector);
251 void dd_collect_state(gmx_domdec_t* dd, const t_state* state_local, t_state* state)
253 int nh = state_local->nhchainlength;
255 if (DDMASTER(dd))
257 GMX_RELEASE_ASSERT(state->nhchainlength == nh,
258 "The global and local Nose-Hoover chain lengths should match");
260 for (int i = 0; i < efptNR; i++)
262 state->lambda[i] = state_local->lambda[i];
264 state->fep_state = state_local->fep_state;
265 state->veta = state_local->veta;
266 state->vol0 = state_local->vol0;
267 copy_mat(state_local->box, state->box);
268 copy_mat(state_local->boxv, state->boxv);
269 copy_mat(state_local->svir_prev, state->svir_prev);
270 copy_mat(state_local->fvir_prev, state->fvir_prev);
271 copy_mat(state_local->pres_prev, state->pres_prev);
273 for (int i = 0; i < state_local->ngtc; i++)
275 for (int j = 0; j < nh; j++)
277 state->nosehoover_xi[i * nh + j] = state_local->nosehoover_xi[i * nh + j];
278 state->nosehoover_vxi[i * nh + j] = state_local->nosehoover_vxi[i * nh + j];
280 state->therm_integral[i] = state_local->therm_integral[i];
282 for (int i = 0; i < state_local->nnhpres; i++)
284 for (int j = 0; j < nh; j++)
286 state->nhpres_xi[i * nh + j] = state_local->nhpres_xi[i * nh + j];
287 state->nhpres_vxi[i * nh + j] = state_local->nhpres_vxi[i * nh + j];
290 state->baros_integral = state_local->baros_integral;
291 state->pull_com_prev_step = state_local->pull_com_prev_step;
293 if (state_local->flags & (1 << estX))
295 auto globalXRef = state ? state->x : gmx::ArrayRef<gmx::RVec>();
296 dd_collect_vec(dd, state_local->ddp_count, state_local->ddp_count_cg_gl, state_local->cg_gl,
297 state_local->x, globalXRef);
299 if (state_local->flags & (1 << estV))
301 auto globalVRef = state ? state->v : gmx::ArrayRef<gmx::RVec>();
302 dd_collect_vec(dd, state_local->ddp_count, state_local->ddp_count_cg_gl, state_local->cg_gl,
303 state_local->v, globalVRef);
305 if (state_local->flags & (1 << estCGP))
307 auto globalCgpRef = state ? state->cg_p : gmx::ArrayRef<gmx::RVec>();
308 dd_collect_vec(dd, state_local->ddp_count, state_local->ddp_count_cg_gl, state_local->cg_gl,
309 state_local->cg_p, globalCgpRef);