Properly finalize MPI on mdrun -version. Fixes #1313
[gromacs.git] / include / types / commrec.h
blob632c52da343516b69f5dc0057002b9cb1d14e9bc
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 #ifndef _commrec_h
39 #define _commrec_h
41 #ifdef GMX_LIB_MPI
42 #include <mpi.h>
43 #else
44 #ifdef GMX_THREAD_MPI
45 #include "../thread_mpi/tmpi.h"
46 #include "../thread_mpi/mpi_bindings.h"
47 #else
48 typedef void* MPI_Comm;
49 typedef void* MPI_Request;
50 typedef void* MPI_Group;
51 #endif
52 #endif
54 #include "idef.h"
56 #ifdef __cplusplus
57 extern "C" {
58 #endif
61 #define DD_MAXZONE 8
62 #define DD_MAXIZONE 4
64 typedef struct gmx_domdec_master *gmx_domdec_master_p_t;
66 typedef struct {
67 int j0; /* j-zone start */
68 int j1; /* j-zone end */
69 int cg1; /* i-charge-group end */
70 int jcg0; /* j-charge-group start */
71 int jcg1; /* j-charge-group end */
72 ivec shift0; /* Minimum shifts to consider */
73 ivec shift1; /* Maximum shifts to consider */
74 } gmx_domdec_ns_ranges_t;
76 typedef struct {
77 rvec x0; /* Zone lower corner in triclinic coordinates */
78 rvec x1; /* Zone upper corner in triclinic coordinates */
79 rvec bb_x0; /* Zone bounding box lower corner in Cartesian coords */
80 rvec bb_x1; /* Zone bounding box upper corner in Cartesian coords */
81 } gmx_domdec_zone_size_t;
83 typedef struct {
84 /* The number of zones including the home zone */
85 int n;
86 /* The shift of the zones with respect to the home zone */
87 ivec shift[DD_MAXZONE];
88 /* The charge group boundaries for the zones */
89 int cg_range[DD_MAXZONE+1];
90 /* The number of neighbor search zones with i-particles */
91 int nizone;
92 /* The neighbor search charge group ranges for each i-zone */
93 gmx_domdec_ns_ranges_t izone[DD_MAXIZONE];
94 /* Boundaries of the zones */
95 gmx_domdec_zone_size_t size[DD_MAXZONE];
96 /* The cg density of the home zone */
97 real dens_zone0;
98 } gmx_domdec_zones_t;
100 typedef struct gmx_ga2la *gmx_ga2la_t;
102 typedef struct gmx_hash *gmx_hash_t;
104 typedef struct gmx_reverse_top *gmx_reverse_top_p_t;
106 typedef struct gmx_domdec_constraints *gmx_domdec_constraints_p_t;
108 typedef struct gmx_domdec_specat_comm *gmx_domdec_specat_comm_p_t;
110 typedef struct gmx_domdec_comm *gmx_domdec_comm_p_t;
112 typedef struct gmx_pme_comm_n_box *gmx_pme_comm_n_box_p_t;
114 typedef struct {
115 int npbcdim;
116 int nboundeddim;
117 rvec box0;
118 rvec box_size;
119 /* Tells if the box is skewed for each of the three cartesian directions */
120 ivec tric_dir;
121 rvec skew_fac;
122 /* Orthogonal vectors for triclinic cells, Cartesian index */
123 rvec v[DIM][DIM];
124 /* Normal vectors for the cells walls */
125 rvec normal[DIM];
126 } gmx_ddbox_t;
129 typedef struct {
130 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
131 supported.*/
132 int *ibuf; /* for ints */
133 int ibuf_alloc;
135 gmx_large_int_t *libuf;
136 int libuf_alloc;
138 float *fbuf; /* for floats */
139 int fbuf_alloc;
141 double *dbuf; /* for doubles */
142 int dbuf_alloc;
143 } mpi_in_place_buf_t;
146 typedef struct {
147 /* The DD particle-particle nodes only */
148 /* The communication setup within the communicator all
149 * defined in dd->comm in domdec.c
151 int nnodes;
152 MPI_Comm mpi_comm_all;
153 /* Use MPI_Sendrecv communication instead of non-blocking calls */
154 gmx_bool bSendRecv2;
155 /* The local DD cell index and rank */
156 ivec ci;
157 int rank;
158 ivec master_ci;
159 int masterrank;
160 /* Communication with the PME only nodes */
161 int pme_nodeid;
162 gmx_bool pme_receive_vir_ener;
163 gmx_pme_comm_n_box_p_t cnb;
164 int nreq_pme;
165 MPI_Request req_pme[4];
168 /* The communication setup, identical for each cell, cartesian index */
169 ivec nc;
170 int ndim;
171 ivec dim; /* indexed by 0 to ndim */
172 gmx_bool bGridJump;
174 /* PBC from dim 0 to npbcdim */
175 int npbcdim;
177 /* Screw PBC? */
178 gmx_bool bScrewPBC;
180 /* Forward and backward neighboring cells, indexed by 0 to ndim */
181 int neighbor[DIM][2];
183 /* Only available on the master node */
184 gmx_domdec_master_p_t ma;
186 /* Are there inter charge group constraints */
187 gmx_bool bInterCGcons;
188 gmx_bool bInterCGsettles;
190 /* Global atom number to interaction list */
191 gmx_reverse_top_p_t reverse_top;
192 int nbonded_global;
193 int nbonded_local;
195 /* The number of inter charge-group exclusions */
196 int n_intercg_excl;
198 /* Vsite stuff */
199 gmx_hash_t ga2la_vsite;
200 gmx_domdec_specat_comm_p_t vsite_comm;
202 /* Constraint stuff */
203 gmx_domdec_constraints_p_t constraints;
204 gmx_domdec_specat_comm_p_t constraint_comm;
206 /* The local to gobal charge group index and local cg to local atom index */
207 int ncg_home;
208 int ncg_tot;
209 int *index_gl;
210 int *cgindex;
211 int cg_nalloc;
212 /* Local atom to local cg index, only for special cases */
213 int *la2lc;
214 int la2lc_nalloc;
216 /* The number of home atoms */
217 int nat_home;
218 /* The total number of atoms: home and received zones */
219 int nat_tot;
220 /* Index from the local atoms to the global atoms */
221 int *gatindex;
222 int gatindex_nalloc;
224 /* Global atom number to local atom number list */
225 gmx_ga2la_t ga2la;
227 /* Communication stuff */
228 gmx_domdec_comm_p_t comm;
230 /* The partioning count, to keep track of the state */
231 gmx_large_int_t ddp_count;
234 /* gmx_pme_recv_f buffer */
235 int pme_recv_f_alloc;
236 rvec *pme_recv_f_buf;
238 } gmx_domdec_t;
240 typedef struct gmx_partdec *gmx_partdec_p_t;
242 typedef struct {
243 int nsim;
244 int sim;
245 MPI_Group mpi_group_masters;
246 MPI_Comm mpi_comm_masters;
247 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
248 supported.*/
249 mpi_in_place_buf_t *mpb;
250 } gmx_multisim_t;
252 #define DUTY_PP (1<<0)
253 #define DUTY_PME (1<<1)
255 typedef struct {
256 int bUse;
257 MPI_Comm comm_intra;
258 int rank_intra;
259 MPI_Comm comm_inter;
261 } gmx_nodecomm_t;
263 typedef struct {
264 /* The nodeids in one sim are numbered sequentially from 0.
265 * All communication within some simulation should happen
266 * in mpi_comm_mysim, or its subset mpi_comm_mygroup.
268 int sim_nodeid, nnodes, npmenodes;
270 /* thread numbers: */
271 /* Not used yet: int threadid, nthreads; */
272 /* The nodeid in the PP/PME, PP or PME group */
273 int nodeid;
274 MPI_Comm mpi_comm_mysim;
275 MPI_Comm mpi_comm_mygroup;
277 /* MPI ranks within a physical node for hardware access */
278 int nrank_intranode; /* nr of ranks on this physical node */
279 int rank_intranode; /* our rank on this physical node */
280 int nrank_pp_intranode; /* as nrank_intranode, for particle-particle only */
281 int rank_pp_intranode; /* as rank_intranode, for particle-particle only */
283 gmx_nodecomm_t nc;
285 /* For domain decomposition */
286 gmx_domdec_t *dd;
288 /* For particle decomposition */
289 gmx_partdec_p_t pd;
291 /* The duties of this node, see the defines above */
292 int duty;
294 gmx_multisim_t *ms;
296 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
297 supported.*/
298 mpi_in_place_buf_t *mpb;
299 } t_commrec;
301 #define MASTERNODE(cr) (((cr)->nodeid == 0) || !PAR(cr))
302 /* #define MASTERTHREAD(cr) ((cr)->threadid == 0) */
303 /* #define MASTER(cr) (MASTERNODE(cr) && MASTERTHREAD(cr)) */
304 #define MASTER(cr) MASTERNODE(cr)
305 #define SIMMASTER(cr) ((MASTER(cr) && ((cr)->duty & DUTY_PP)) || !PAR(cr))
306 #define NODEPAR(cr) ((cr)->nnodes > 1)
307 /* #define THREADPAR(cr) ((cr)->nthreads > 1) */
308 /* #define PAR(cr) (NODEPAR(cr) || THREADPAR(cr)) */
309 #define PAR(cr) NODEPAR(cr)
310 #define RANK(cr, nodeid) (nodeid)
311 #define MASTERRANK(cr) (0)
313 #define DOMAINDECOMP(cr) (((cr)->dd != NULL) && PAR(cr))
314 #define DDMASTER(dd) ((dd)->rank == (dd)->masterrank)
316 #define PARTDECOMP(cr) ((cr)->pd != NULL)
318 #define MULTISIM(cr) ((cr)->ms)
319 #define MSRANK(ms, nodeid) (nodeid)
320 #define MASTERSIM(ms) ((ms)->sim == 0)
322 /* The master of all (the node that prints the remaining run time etc.) */
323 #define MULTIMASTER(cr) (SIMMASTER(cr) && (!MULTISIM(cr) || MASTERSIM((cr)->ms)))
325 #ifdef __cplusplus
327 #endif
328 #endif