Update instructions in containers.rst
[gromacs.git] / src / gromacs / domdec / domdec_network.cpp
blob9bc3b993445358bd91b17fcdf46cf51797d28e4f
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2008-2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \internal \file
38 * \brief This file defines functions for (mostly) the domdec module
39 * to use MPI functionality
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_domdec
45 #include "gmxpre.h"
47 #include "domdec_network.h"
49 #include "config.h"
51 #include <cstring>
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/utility/gmxmpi.h"
56 #include "domdec_internal.h"
59 /*! \brief Returns the MPI rank of the domain decomposition master rank */
60 #define DDMASTERRANK(dd) ((dd)->masterrank)
63 /*! \brief Move data of type \p T in the communication region one cell along
64 * the domain decomposition
66 * Moves in the dimension indexed by ddDimensionIndex, either forward
67 * (direction=dddirFoward) or backward (direction=dddirBackward).
69 template<typename T>
70 void ddSendrecv(const struct gmx_domdec_t* dd,
71 int ddDimensionIndex,
72 int direction,
73 T* sendBuffer,
74 int numElementsToSend,
75 T* receiveBuffer,
76 int numElementsToReceive)
78 #if GMX_MPI
79 int sendRank = dd->neighbor[ddDimensionIndex][direction == dddirForward ? 0 : 1];
80 int receiveRank = dd->neighbor[ddDimensionIndex][direction == dddirForward ? 1 : 0];
82 constexpr int mpiTag = 0;
83 MPI_Status mpiStatus;
84 if (numElementsToSend > 0 && numElementsToReceive > 0)
86 MPI_Sendrecv(sendBuffer, numElementsToSend * sizeof(T), MPI_BYTE, sendRank, mpiTag,
87 receiveBuffer, numElementsToReceive * sizeof(T), MPI_BYTE, receiveRank, mpiTag,
88 dd->mpi_comm_all, &mpiStatus);
90 else if (numElementsToSend > 0)
92 MPI_Send(sendBuffer, numElementsToSend * sizeof(T), MPI_BYTE, sendRank, mpiTag, dd->mpi_comm_all);
94 else if (numElementsToReceive > 0)
96 MPI_Recv(receiveBuffer, numElementsToReceive * sizeof(T), MPI_BYTE, receiveRank, mpiTag,
97 dd->mpi_comm_all, &mpiStatus);
99 #else // GMX_MPI
100 GMX_UNUSED_VALUE(dd);
101 GMX_UNUSED_VALUE(ddDimensionIndex);
102 GMX_UNUSED_VALUE(direction);
103 GMX_UNUSED_VALUE(sendBuffer);
104 GMX_UNUSED_VALUE(numElementsToSend);
105 GMX_UNUSED_VALUE(receiveBuffer);
106 GMX_UNUSED_VALUE(numElementsToReceive);
107 #endif // GMX_MPI
110 //! Specialization of extern template for int
111 template void ddSendrecv(const gmx_domdec_t*, int, int, int*, int, int*, int);
112 //! Specialization of extern template for real
113 template void ddSendrecv(const gmx_domdec_t*, int, int, real*, int, real*, int);
114 //! Specialization of extern template for gmx::RVec
115 template void ddSendrecv(const gmx_domdec_t*, int, int, rvec*, int, rvec*, int);
117 template<typename T>
118 void ddSendrecv(const gmx_domdec_t* dd,
119 int ddDimensionIndex,
120 int direction,
121 gmx::ArrayRef<T> sendBuffer,
122 gmx::ArrayRef<T> receiveBuffer)
124 ddSendrecv(dd, ddDimensionIndex, direction, sendBuffer.data(), sendBuffer.size(),
125 receiveBuffer.data(), receiveBuffer.size());
128 //! Specialization of extern template for int
129 template void ddSendrecv(const gmx_domdec_t*, int, int, gmx::ArrayRef<int>, gmx::ArrayRef<int>);
130 //! Specialization of extern template for real
131 template void ddSendrecv(const gmx_domdec_t*, int, int, gmx::ArrayRef<real>, gmx::ArrayRef<real>);
132 //! Specialization of extern template for gmx::RVec
133 template void ddSendrecv(const gmx_domdec_t*, int, int, gmx::ArrayRef<gmx::RVec>, gmx::ArrayRef<gmx::RVec>);
135 void dd_sendrecv2_rvec(const struct gmx_domdec_t gmx_unused* dd,
136 int gmx_unused ddimind,
137 rvec gmx_unused* buf_s_fw,
138 int gmx_unused n_s_fw,
139 rvec gmx_unused* buf_r_fw,
140 int gmx_unused n_r_fw,
141 rvec gmx_unused* buf_s_bw,
142 int gmx_unused n_s_bw,
143 rvec gmx_unused* buf_r_bw,
144 int gmx_unused n_r_bw)
146 #if GMX_MPI
147 int rank_fw, rank_bw, nreq;
148 MPI_Request req[4];
149 MPI_Status stat[4];
151 rank_fw = dd->neighbor[ddimind][0];
152 rank_bw = dd->neighbor[ddimind][1];
154 if (!dd->comm->ddSettings.useSendRecv2)
156 /* Try to send and receive in two directions simultaneously.
157 * Should be faster, especially on machines
158 * with full 3D communication networks.
159 * However, it could be that communication libraries are
160 * optimized for MPI_Sendrecv and non-blocking MPI calls
161 * are slower.
162 * SendRecv2 can be turned on with the env.var. GMX_DD_SENDRECV2
164 nreq = 0;
165 if (n_r_fw)
167 MPI_Irecv(buf_r_fw[0], n_r_fw * sizeof(rvec), MPI_BYTE, rank_bw, 0, dd->mpi_comm_all,
168 &req[nreq++]);
170 if (n_r_bw)
172 MPI_Irecv(buf_r_bw[0], n_r_bw * sizeof(rvec), MPI_BYTE, rank_fw, 1, dd->mpi_comm_all,
173 &req[nreq++]);
175 if (n_s_fw)
177 MPI_Isend(buf_s_fw[0], n_s_fw * sizeof(rvec), MPI_BYTE, rank_fw, 0, dd->mpi_comm_all,
178 &req[nreq++]);
180 if (n_s_bw)
182 MPI_Isend(buf_s_bw[0], n_s_bw * sizeof(rvec), MPI_BYTE, rank_bw, 1, dd->mpi_comm_all,
183 &req[nreq++]);
185 if (nreq)
187 MPI_Waitall(nreq, req, stat); //NOLINT(clang-analyzer-optin.mpi.MPI-Checker)
190 else
192 /* Communicate in two ordered phases.
193 * This is slower, even on a dual-core Opteron cluster
194 * with a single full-duplex network connection per machine.
196 /* Forward */
197 MPI_Sendrecv(buf_s_fw[0], n_s_fw * sizeof(rvec), MPI_BYTE, rank_fw, 0, buf_r_fw[0],
198 n_r_fw * sizeof(rvec), MPI_BYTE, rank_bw, 0, dd->mpi_comm_all, &stat[0]);
199 /* Backward */
200 MPI_Sendrecv(buf_s_bw[0], n_s_bw * sizeof(rvec), MPI_BYTE, rank_bw, 0, buf_r_bw[0],
201 n_r_bw * sizeof(rvec), MPI_BYTE, rank_fw, 0, dd->mpi_comm_all, &stat[0]);
203 #endif
206 void dd_bcast(const gmx_domdec_t gmx_unused* dd, int gmx_unused nbytes, void gmx_unused* data)
208 #if GMX_MPI
209 if (dd->nnodes > 1)
211 MPI_Bcast(data, nbytes, MPI_BYTE, DDMASTERRANK(dd), dd->mpi_comm_all);
213 #endif
216 void dd_bcastc(const gmx_domdec_t* dd, int nbytes, void* src, void* dest)
218 if (DDMASTER(dd) || dd->nnodes == 1)
220 memcpy(dest, src, nbytes);
222 #if GMX_MPI
223 if (dd->nnodes > 1)
225 MPI_Bcast(dest, nbytes, MPI_BYTE, DDMASTERRANK(dd), dd->mpi_comm_all);
227 #endif
230 void dd_scatter(const gmx_domdec_t gmx_unused* dd, int gmx_unused nbytes, const void gmx_unused* src, void* dest)
232 #if GMX_MPI
233 if (dd->nnodes > 1)
235 /* Some MPI implementions don't specify const */
236 MPI_Scatter(const_cast<void*>(src), nbytes, MPI_BYTE, dest, nbytes, MPI_BYTE,
237 DDMASTERRANK(dd), dd->mpi_comm_all);
239 else
240 #endif
242 /* 1 rank, either we copy everything, or dest=src: nothing to do */
243 if (dest != src)
245 memcpy(dest, src, nbytes);
250 void dd_gather(const gmx_domdec_t gmx_unused* dd,
251 int gmx_unused nbytes,
252 const void gmx_unused* src,
253 void gmx_unused* dest)
255 #if GMX_MPI
256 /* Some MPI implementions don't specify const */
257 MPI_Gather(const_cast<void*>(src), nbytes, MPI_BYTE, dest, nbytes, MPI_BYTE, DDMASTERRANK(dd),
258 dd->mpi_comm_all);
259 #endif
262 void dd_scatterv(const gmx_domdec_t gmx_unused* dd,
263 int gmx_unused* scounts,
264 int gmx_unused* disps,
265 const void* sbuf,
266 int rcount,
267 void* rbuf)
269 #if GMX_MPI
270 int dum;
272 if (dd->nnodes > 1)
274 if (rcount == 0)
276 /* MPI does not allow NULL pointers */
277 rbuf = &dum;
279 /* Some MPI implementions don't specify const */
280 MPI_Scatterv(const_cast<void*>(sbuf), scounts, disps, MPI_BYTE, rbuf, rcount, MPI_BYTE,
281 DDMASTERRANK(dd), dd->mpi_comm_all);
283 else
284 #endif
286 /* 1 rank, either we copy everything, or rbuf=sbuf: nothing to do */
287 if (rbuf != sbuf)
289 memcpy(rbuf, sbuf, rcount);
294 void dd_gatherv(const gmx_domdec_t gmx_unused* dd,
295 int gmx_unused scount,
296 const void gmx_unused* sbuf,
297 int gmx_unused* rcounts,
298 int gmx_unused* disps,
299 void gmx_unused* rbuf)
301 #if GMX_MPI
302 int dum;
304 if (scount == 0)
306 /* MPI does not allow NULL pointers */
307 sbuf = &dum;
309 /* Some MPI implementions don't specify const */
310 MPI_Gatherv(const_cast<void*>(sbuf), scount, MPI_BYTE, rbuf, rcounts, disps, MPI_BYTE,
311 DDMASTERRANK(dd), dd->mpi_comm_all);
312 #endif