Add replacements for pbc enumerations
[gromacs.git] / src / gromacs / domdec / distribute.cpp
blob57540a1e14d5aec8251c9917f541600708dd91c8
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /* \internal \file
37 * \brief Implements atom distribution functions.
39 * \author Berk Hess <hess@kth.se>
40 * \ingroup module_domdec
43 #include "gmxpre.h"
45 #include "distribute.h"
47 #include "config.h"
49 #include <vector>
51 #include "gromacs/domdec/domdec_network.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdtypes/commrec.h"
54 #include "gromacs/mdtypes/df_history.h"
55 #include "gromacs/mdtypes/state.h"
56 #include "gromacs/topology/topology.h"
57 #include "gromacs/utility/fatalerror.h"
58 #include "gromacs/utility/logger.h"
60 #include "atomdistribution.h"
61 #include "cellsizes.h"
62 #include "domdec_internal.h"
63 #include "utility.h"
65 static void distributeVecSendrecv(gmx_domdec_t *dd,
66 gmx::ArrayRef<const gmx::RVec> globalVec,
67 gmx::ArrayRef<gmx::RVec> localVec)
69 if (DDMASTER(dd))
71 const t_block &atomGroups = dd->comm->cgs_gl;
73 std::vector<gmx::RVec> buffer;
75 for (int rank = 0; rank < dd->nnodes; rank++)
77 if (rank != dd->rank)
79 const auto &domainGroups = dd->ma->domainGroups[rank];
81 buffer.resize(domainGroups.numAtoms);
83 int localAtom = 0;
84 for (const int &g : domainGroups.atomGroups)
86 for (int globalAtom = atomGroups.index[g]; globalAtom < atomGroups.index[g + 1]; globalAtom++)
88 buffer[localAtom++] = globalVec[globalAtom];
91 GMX_RELEASE_ASSERT(localAtom == domainGroups.numAtoms, "The index count and number of indices should match");
93 #if GMX_MPI
94 MPI_Send(buffer.data(), domainGroups.numAtoms*sizeof(gmx::RVec), MPI_BYTE,
95 rank, rank, dd->mpi_comm_all);
96 #endif
100 const auto &domainGroups = dd->ma->domainGroups[dd->masterrank];
101 int localAtom = 0;
102 for (const int &g : domainGroups.atomGroups)
104 for (int globalAtom = atomGroups.index[g]; globalAtom < atomGroups.index[g + 1]; globalAtom++)
106 localVec[localAtom++] = globalVec[globalAtom];
110 else
112 #if GMX_MPI
113 int numHomeAtoms = dd->comm->atomRanges.numHomeAtoms();
114 MPI_Recv(localVec.data(), numHomeAtoms*sizeof(gmx::RVec), MPI_BYTE, dd->masterrank,
115 MPI_ANY_TAG, dd->mpi_comm_all, MPI_STATUS_IGNORE);
116 #endif
120 static void distributeVecScatterv(gmx_domdec_t *dd,
121 gmx::ArrayRef<const gmx::RVec> globalVec,
122 gmx::ArrayRef<gmx::RVec> localVec)
124 int *sendCounts = nullptr;
125 int *displacements = nullptr;
127 if (DDMASTER(dd))
129 AtomDistribution &ma = *dd->ma;
131 get_commbuffer_counts(&ma, &sendCounts, &displacements);
133 const t_block &atomGroups = dd->comm->cgs_gl;
135 gmx::ArrayRef<gmx::RVec> buffer = ma.rvecBuffer;
136 int localAtom = 0;
137 for (int rank = 0; rank < dd->nnodes; rank++)
139 const auto &domainGroups = ma.domainGroups[rank];
140 for (const int &g : domainGroups.atomGroups)
142 for (int globalAtom = atomGroups.index[g]; globalAtom < atomGroups.index[g + 1]; globalAtom++)
144 buffer[localAtom++] = globalVec[globalAtom];
150 int numHomeAtoms = dd->comm->atomRanges.numHomeAtoms();
151 dd_scatterv(dd, sendCounts, displacements,
152 DDMASTER(dd) ? dd->ma->rvecBuffer.data() : nullptr,
153 numHomeAtoms*sizeof(gmx::RVec), localVec.data());
156 static void distributeVec(gmx_domdec_t *dd,
157 gmx::ArrayRef<const gmx::RVec> globalVec,
158 gmx::ArrayRef<gmx::RVec> localVec)
160 if (dd->nnodes <= c_maxNumRanksUseSendRecvForScatterAndGather)
162 distributeVecSendrecv(dd, globalVec, localVec);
164 else
166 distributeVecScatterv(dd, globalVec, localVec);
170 static void dd_distribute_dfhist(gmx_domdec_t *dd, df_history_t *dfhist)
172 if (dfhist == nullptr)
174 return;
177 dd_bcast(dd, sizeof(int), &dfhist->bEquil);
178 dd_bcast(dd, sizeof(int), &dfhist->nlambda);
179 dd_bcast(dd, sizeof(real), &dfhist->wl_delta);
181 if (dfhist->nlambda > 0)
183 int nlam = dfhist->nlambda;
184 dd_bcast(dd, sizeof(int)*nlam, dfhist->n_at_lam);
185 dd_bcast(dd, sizeof(real)*nlam, dfhist->wl_histo);
186 dd_bcast(dd, sizeof(real)*nlam, dfhist->sum_weights);
187 dd_bcast(dd, sizeof(real)*nlam, dfhist->sum_dg);
188 dd_bcast(dd, sizeof(real)*nlam, dfhist->sum_minvar);
189 dd_bcast(dd, sizeof(real)*nlam, dfhist->sum_variance);
191 for (int i = 0; i < nlam; i++)
193 dd_bcast(dd, sizeof(real)*nlam, dfhist->accum_p[i]);
194 dd_bcast(dd, sizeof(real)*nlam, dfhist->accum_m[i]);
195 dd_bcast(dd, sizeof(real)*nlam, dfhist->accum_p2[i]);
196 dd_bcast(dd, sizeof(real)*nlam, dfhist->accum_m2[i]);
197 dd_bcast(dd, sizeof(real)*nlam, dfhist->Tij[i]);
198 dd_bcast(dd, sizeof(real)*nlam, dfhist->Tij_empirical[i]);
203 static void dd_distribute_state(gmx_domdec_t *dd,
204 const t_state *state, t_state *state_local,
205 PaddedVector<gmx::RVec> *f)
207 int nh = state_local->nhchainlength;
209 if (DDMASTER(dd))
211 GMX_RELEASE_ASSERT(state->nhchainlength == nh, "The global and local Nose-Hoover chain lengths should match");
213 for (int i = 0; i < efptNR; i++)
215 state_local->lambda[i] = state->lambda[i];
217 state_local->fep_state = state->fep_state;
218 state_local->veta = state->veta;
219 state_local->vol0 = state->vol0;
220 copy_mat(state->box, state_local->box);
221 copy_mat(state->box_rel, state_local->box_rel);
222 copy_mat(state->boxv, state_local->boxv);
223 copy_mat(state->svir_prev, state_local->svir_prev);
224 copy_mat(state->fvir_prev, state_local->fvir_prev);
225 if (state->dfhist != nullptr)
227 copy_df_history(state_local->dfhist, state->dfhist);
229 for (int i = 0; i < state_local->ngtc; i++)
231 for (int j = 0; j < nh; j++)
233 state_local->nosehoover_xi[i*nh+j] = state->nosehoover_xi[i*nh+j];
234 state_local->nosehoover_vxi[i*nh+j] = state->nosehoover_vxi[i*nh+j];
236 state_local->therm_integral[i] = state->therm_integral[i];
238 for (int i = 0; i < state_local->nnhpres; i++)
240 for (int j = 0; j < nh; j++)
242 state_local->nhpres_xi[i*nh+j] = state->nhpres_xi[i*nh+j];
243 state_local->nhpres_vxi[i*nh+j] = state->nhpres_vxi[i*nh+j];
246 state_local->baros_integral = state->baros_integral;
248 dd_bcast(dd, ((efptNR)*sizeof(real)), state_local->lambda.data());
249 dd_bcast(dd, sizeof(int), &state_local->fep_state);
250 dd_bcast(dd, sizeof(real), &state_local->veta);
251 dd_bcast(dd, sizeof(real), &state_local->vol0);
252 dd_bcast(dd, sizeof(state_local->box), state_local->box);
253 dd_bcast(dd, sizeof(state_local->box_rel), state_local->box_rel);
254 dd_bcast(dd, sizeof(state_local->boxv), state_local->boxv);
255 dd_bcast(dd, sizeof(state_local->svir_prev), state_local->svir_prev);
256 dd_bcast(dd, sizeof(state_local->fvir_prev), state_local->fvir_prev);
257 dd_bcast(dd, ((state_local->ngtc*nh)*sizeof(double)), state_local->nosehoover_xi.data());
258 dd_bcast(dd, ((state_local->ngtc*nh)*sizeof(double)), state_local->nosehoover_vxi.data());
259 dd_bcast(dd, state_local->ngtc*sizeof(double), state_local->therm_integral.data());
260 dd_bcast(dd, ((state_local->nnhpres*nh)*sizeof(double)), state_local->nhpres_xi.data());
261 dd_bcast(dd, ((state_local->nnhpres*nh)*sizeof(double)), state_local->nhpres_vxi.data());
263 /* communicate df_history -- required for restarting from checkpoint */
264 dd_distribute_dfhist(dd, state_local->dfhist);
266 dd_resize_state(state_local, f, dd->comm->atomRanges.numHomeAtoms());
268 if (state_local->flags & (1 << estX))
270 distributeVec(dd, DDMASTER(dd) ? state->x : gmx::ArrayRef<const gmx::RVec>(), state_local->x);
272 if (state_local->flags & (1 << estV))
274 distributeVec(dd, DDMASTER(dd) ? state->v : gmx::ArrayRef<const gmx::RVec>(), state_local->v);
276 if (state_local->flags & (1 << estCGP))
278 distributeVec(dd, DDMASTER(dd) ? state->cg_p : gmx::ArrayRef<const gmx::RVec>(), state_local->cg_p);
282 /* Computes and returns the domain index for the given atom group.
284 * Also updates the coordinates in pos for PBC, when necessary.
286 static inline int
287 computeAtomGroupDomainIndex(const gmx_domdec_t &dd,
288 const gmx_ddbox_t &ddbox,
289 const matrix &triclinicCorrectionMatrix,
290 gmx::ArrayRef < const std::vector < real>> cellBoundaries,
291 int atomBegin,
292 int atomEnd,
293 const matrix box,
294 rvec *pos)
296 /* Set the reference location cg_cm for assigning the group */
297 rvec cog;
298 int numAtoms = atomEnd - atomBegin;
299 if (numAtoms == 1)
301 copy_rvec(pos[atomBegin], cog);
303 else
305 real invNumAtoms = 1/static_cast<real>(numAtoms);
307 clear_rvec(cog);
308 for (int a = atomBegin; a < atomEnd; a++)
310 rvec_inc(cog, pos[a]);
312 for (int d = 0; d < DIM; d++)
314 cog[d] *= invNumAtoms;
317 /* Put the charge group in the box and determine the cell index ind */
318 ivec ind;
319 for (int d = DIM - 1; d >= 0; d--)
321 real pos_d = cog[d];
322 if (d < dd.npbcdim)
324 bool bScrew = (dd.bScrewPBC && d == XX);
325 if (ddbox.tric_dir[d] && dd.nc[d] > 1)
327 /* Use triclinic coordinates for this dimension */
328 for (int j = d + 1; j < DIM; j++)
330 pos_d += cog[j]*triclinicCorrectionMatrix[j][d];
333 while (pos_d >= box[d][d])
335 pos_d -= box[d][d];
336 rvec_dec(cog, box[d]);
337 if (bScrew)
339 cog[YY] = box[YY][YY] - cog[YY];
340 cog[ZZ] = box[ZZ][ZZ] - cog[ZZ];
342 for (int a = atomBegin; a < atomEnd; a++)
344 rvec_dec(pos[a], box[d]);
345 if (bScrew)
347 pos[a][YY] = box[YY][YY] - pos[a][YY];
348 pos[a][ZZ] = box[ZZ][ZZ] - pos[a][ZZ];
352 while (pos_d < 0)
354 pos_d += box[d][d];
355 rvec_inc(cog, box[d]);
356 if (bScrew)
358 cog[YY] = box[YY][YY] - cog[YY];
359 cog[ZZ] = box[ZZ][ZZ] - cog[ZZ];
361 for (int a = atomBegin; a < atomEnd; a++)
363 rvec_inc(pos[a], box[d]);
364 if (bScrew)
366 pos[a][YY] = box[YY][YY] - pos[a][YY];
367 pos[a][ZZ] = box[ZZ][ZZ] - pos[a][ZZ];
372 /* This could be done more efficiently */
373 ind[d] = 0;
374 while (ind[d] + 1 < dd.nc[d] && pos_d >= cellBoundaries[d][ind[d] + 1])
376 ind[d]++;
380 return dd_index(dd.nc, ind);
384 static std::vector < std::vector < int>>
385 getAtomGroupDistribution(const gmx::MDLogger &mdlog,
386 const gmx_mtop_t &mtop,
387 const matrix box, const gmx_ddbox_t &ddbox,
388 rvec pos[],
389 gmx_domdec_t *dd)
391 AtomDistribution &ma = *dd->ma;
393 /* Clear the count */
394 for (int rank = 0; rank < dd->nnodes; rank++)
396 ma.domainGroups[rank].numAtoms = 0;
399 matrix triclinicCorrectionMatrix;
400 make_tric_corr_matrix(dd->npbcdim, box, triclinicCorrectionMatrix);
402 ivec npulse;
403 const auto cellBoundaries =
404 set_dd_cell_sizes_slb(dd, &ddbox, setcellsizeslbMASTER, npulse);
406 const t_block *cgs = &dd->comm->cgs_gl;
407 const int *cgindex = cgs->index;
409 std::vector < std::vector < int>> indices(dd->nnodes);
411 if (dd->comm->useUpdateGroups)
413 int atomOffset = 0;
414 for (const gmx_molblock_t &molblock : mtop.molblock)
416 const auto &updateGrouping = dd->comm->updateGroupingPerMoleculetype[molblock.type];
418 for (int mol = 0; mol < molblock.nmol; mol++)
420 for (int g = 0; g < updateGrouping.numBlocks(); g++)
422 const auto &block = updateGrouping.block(g);
423 const int atomBegin = atomOffset + block.begin();
424 const int atomEnd = atomOffset + block.end();
425 const int domainIndex =
426 computeAtomGroupDomainIndex(*dd, ddbox, triclinicCorrectionMatrix,
427 cellBoundaries,
428 atomBegin, atomEnd, box,
429 pos);
431 for (int atomIndex : block)
433 indices[domainIndex].push_back(atomOffset + atomIndex);
435 ma.domainGroups[domainIndex].numAtoms += block.size();
438 atomOffset += updateGrouping.fullRange().end();
442 GMX_RELEASE_ASSERT(atomOffset == mtop.natoms, "Should distribute all atoms");
444 else
446 /* Compute the center of geometry for all atoms or charge groups */
447 for (int icg = 0; icg < cgs->nr; icg++)
449 int domainIndex =
450 computeAtomGroupDomainIndex(*dd, ddbox, triclinicCorrectionMatrix,
451 cellBoundaries,
452 cgindex[icg], cgindex[icg + 1], box,
453 pos);
455 indices[domainIndex].push_back(icg);
456 ma.domainGroups[domainIndex].numAtoms += cgindex[icg + 1] - cgindex[icg];
461 // Use double for the sums to avoid natoms^2 overflowing
462 // (65537^2 > 2^32)
463 int nat_sum, nat_min, nat_max;
464 double nat2_sum;
466 nat_sum = 0;
467 nat2_sum = 0;
468 nat_min = ma.domainGroups[0].numAtoms;
469 nat_max = ma.domainGroups[0].numAtoms;
470 for (int rank = 0; rank < dd->nnodes; rank++)
472 int numAtoms = ma.domainGroups[rank].numAtoms;
473 nat_sum += numAtoms;
474 // convert to double to avoid integer overflows when squaring
475 nat2_sum += gmx::square(double(numAtoms));
476 nat_min = std::min(nat_min, numAtoms);
477 nat_max = std::max(nat_max, numAtoms);
479 nat_sum /= dd->nnodes;
480 nat2_sum /= dd->nnodes;
482 GMX_LOG(mdlog.info).appendTextFormatted(
483 "Atom distribution over %d domains: av %d stddev %d min %d max %d",
484 dd->nnodes,
485 nat_sum,
486 gmx::roundToInt(std::sqrt(nat2_sum - gmx::square(static_cast<double>(nat_sum)))),
487 nat_min, nat_max);
490 return indices;
493 static void distributeAtomGroups(const gmx::MDLogger &mdlog,
494 gmx_domdec_t *dd,
495 const gmx_mtop_t &mtop,
496 const matrix box, const gmx_ddbox_t *ddbox,
497 rvec pos[])
499 AtomDistribution *ma = dd->ma.get();
500 int *ibuf, buf2[2] = { 0, 0 };
501 gmx_bool bMaster = DDMASTER(dd);
503 std::vector < std::vector < int>> groupIndices;
505 if (bMaster)
507 GMX_ASSERT(box && pos, "box or pos not set on master");
509 if (dd->bScrewPBC)
511 check_screw_box(box);
514 groupIndices = getAtomGroupDistribution(mdlog, mtop, box, *ddbox, pos, dd);
516 for (int rank = 0; rank < dd->nnodes; rank++)
518 ma->intBuffer[rank*2] = groupIndices[rank].size();
519 ma->intBuffer[rank*2 + 1] = ma->domainGroups[rank].numAtoms;
521 ibuf = ma->intBuffer.data();
523 else
525 ibuf = nullptr;
527 dd_scatter(dd, 2*sizeof(int), ibuf, buf2);
529 dd->ncg_home = buf2[0];
530 dd->comm->atomRanges.setEnd(DDAtomRanges::Type::Home, buf2[1]);
531 dd->globalAtomGroupIndices.resize(dd->ncg_home);
532 dd->globalAtomIndices.resize(dd->comm->atomRanges.numHomeAtoms());
534 if (bMaster)
536 ma->atomGroups.clear();
538 int groupOffset = 0;
539 for (int rank = 0; rank < dd->nnodes; rank++)
541 ma->intBuffer[rank] = groupIndices[rank].size()*sizeof(int);
542 ma->intBuffer[dd->nnodes + rank] = groupOffset*sizeof(int);
544 ma->atomGroups.insert(ma->atomGroups.end(),
545 groupIndices[rank].begin(), groupIndices[rank].end());
547 ma->domainGroups[rank].atomGroups = gmx::constArrayRefFromArray(ma->atomGroups.data() + groupOffset, groupIndices[rank].size());
549 groupOffset += groupIndices[rank].size();
553 dd_scatterv(dd,
554 bMaster ? ma->intBuffer.data() : nullptr,
555 bMaster ? ma->intBuffer.data() + dd->nnodes : nullptr,
556 bMaster ? ma->atomGroups.data() : nullptr,
557 dd->ncg_home*sizeof(int), dd->globalAtomGroupIndices.data());
559 if (debug)
561 fprintf(debug, "Home charge groups:\n");
562 for (int i = 0; i < dd->ncg_home; i++)
564 fprintf(debug, " %d", dd->globalAtomGroupIndices[i]);
565 if (i % 10 == 9)
567 fprintf(debug, "\n");
570 fprintf(debug, "\n");
574 void distributeState(const gmx::MDLogger &mdlog,
575 gmx_domdec_t *dd,
576 const gmx_mtop_t &mtop,
577 t_state *state_global,
578 const gmx_ddbox_t &ddbox,
579 t_state *state_local,
580 PaddedVector<gmx::RVec> *f)
582 rvec *xGlobal = (DDMASTER(dd) ? state_global->x.rvec_array() : nullptr);
584 distributeAtomGroups(mdlog, dd, mtop,
585 DDMASTER(dd) ? state_global->box : nullptr,
586 &ddbox, xGlobal);
588 dd_distribute_state(dd, state_global, state_local, f);