Replace AI, AJ, .. and C0, C1, C2 macros with functions
[gromacs/AngularHB.git] / src / programs / mdrun / runner.cpp
blobf25ad46c9b9c3b7bf35ba5d9927ce902b21bae19
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "gmxpre.h"
40 #include "config.h"
42 #include <assert.h>
43 #include <signal.h>
44 #include <stdlib.h>
45 #include <string.h>
47 #include <algorithm>
49 #include "gromacs/domdec/domdec.h"
50 #include "gromacs/essentialdynamics/edsam.h"
51 #include "gromacs/ewald/pme.h"
52 #include "gromacs/fileio/tpxio.h"
53 #include "gromacs/gmxlib/gpu_utils/gpu_utils.h"
54 #include "gromacs/legacyheaders/checkpoint.h"
55 #include "gromacs/legacyheaders/constr.h"
56 #include "gromacs/legacyheaders/copyrite.h"
57 #include "gromacs/legacyheaders/disre.h"
58 #include "gromacs/legacyheaders/force.h"
59 #include "gromacs/legacyheaders/gmx_detect_hardware.h"
60 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
61 #include "gromacs/legacyheaders/gmx_thread_affinity.h"
62 #include "gromacs/legacyheaders/inputrec.h"
63 #include "gromacs/legacyheaders/main.h"
64 #include "gromacs/legacyheaders/md_logging.h"
65 #include "gromacs/legacyheaders/md_support.h"
66 #include "gromacs/legacyheaders/mdatoms.h"
67 #include "gromacs/legacyheaders/mdrun.h"
68 #include "gromacs/legacyheaders/names.h"
69 #include "gromacs/legacyheaders/network.h"
70 #include "gromacs/legacyheaders/oenv.h"
71 #include "gromacs/legacyheaders/orires.h"
72 #include "gromacs/legacyheaders/qmmm.h"
73 #include "gromacs/legacyheaders/sighandler.h"
74 #include "gromacs/legacyheaders/txtdump.h"
75 #include "gromacs/legacyheaders/typedefs.h"
76 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
77 #include "gromacs/math/vec.h"
78 #include "gromacs/mdlib/calc_verletbuf.h"
79 #include "gromacs/mdlib/nbnxn_search.h"
80 #include "gromacs/pbcutil/pbc.h"
81 #include "gromacs/pulling/pull.h"
82 #include "gromacs/pulling/pull_rotation.h"
83 #include "gromacs/swap/swapcoords.h"
84 #include "gromacs/timing/wallcycle.h"
85 #include "gromacs/topology/mtop_util.h"
86 #include "gromacs/utility/cstringutil.h"
87 #include "gromacs/utility/gmxassert.h"
88 #include "gromacs/utility/gmxmpi.h"
89 #include "gromacs/utility/smalloc.h"
91 #include "deform.h"
92 #include "membed.h"
93 #include "repl_ex.h"
94 #include "resource-division.h"
96 #ifdef GMX_FAHCORE
97 #include "corewrap.h"
98 #endif
100 typedef struct {
101 gmx_integrator_t *func;
102 } gmx_intp_t;
104 /* The array should match the eI array in include/types/enums.h */
105 const gmx_intp_t integrator[eiNR] = { {do_md}, {do_steep}, {do_cg}, {do_md}, {do_md}, {do_nm}, {do_lbfgs}, {do_tpi}, {do_tpi}, {do_md}, {do_md}, {do_md}};
107 gmx_int64_t deform_init_init_step_tpx;
108 matrix deform_init_box_tpx;
109 tMPI_Thread_mutex_t deform_init_box_mutex = TMPI_THREAD_MUTEX_INITIALIZER;
112 #ifdef GMX_THREAD_MPI
113 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
114 * the number of threads will get lowered.
116 #define MIN_ATOMS_PER_MPI_THREAD 90
117 #define MIN_ATOMS_PER_GPU 900
119 struct mdrunner_arglist
121 gmx_hw_opt_t hw_opt;
122 FILE *fplog;
123 t_commrec *cr;
124 int nfile;
125 const t_filenm *fnm;
126 output_env_t oenv;
127 gmx_bool bVerbose;
128 gmx_bool bCompact;
129 int nstglobalcomm;
130 ivec ddxyz;
131 int dd_node_order;
132 real rdd;
133 real rconstr;
134 const char *dddlb_opt;
135 real dlb_scale;
136 const char *ddcsx;
137 const char *ddcsy;
138 const char *ddcsz;
139 const char *nbpu_opt;
140 int nstlist_cmdline;
141 gmx_int64_t nsteps_cmdline;
142 int nstepout;
143 int resetstep;
144 int nmultisim;
145 int repl_ex_nst;
146 int repl_ex_nex;
147 int repl_ex_seed;
148 real pforce;
149 real cpt_period;
150 real max_hours;
151 int imdport;
152 unsigned long Flags;
156 /* The function used for spawning threads. Extracts the mdrunner()
157 arguments from its one argument and calls mdrunner(), after making
158 a commrec. */
159 static void mdrunner_start_fn(void *arg)
161 struct mdrunner_arglist *mda = (struct mdrunner_arglist*)arg;
162 struct mdrunner_arglist mc = *mda; /* copy the arg list to make sure
163 that it's thread-local. This doesn't
164 copy pointed-to items, of course,
165 but those are all const. */
166 t_commrec *cr; /* we need a local version of this */
167 FILE *fplog = NULL;
168 t_filenm *fnm;
170 fnm = dup_tfn(mc.nfile, mc.fnm);
172 cr = reinitialize_commrec_for_this_thread(mc.cr);
174 if (MASTER(cr))
176 fplog = mc.fplog;
179 mdrunner(&mc.hw_opt, fplog, cr, mc.nfile, fnm, mc.oenv,
180 mc.bVerbose, mc.bCompact, mc.nstglobalcomm,
181 mc.ddxyz, mc.dd_node_order, mc.rdd,
182 mc.rconstr, mc.dddlb_opt, mc.dlb_scale,
183 mc.ddcsx, mc.ddcsy, mc.ddcsz,
184 mc.nbpu_opt, mc.nstlist_cmdline,
185 mc.nsteps_cmdline, mc.nstepout, mc.resetstep,
186 mc.nmultisim, mc.repl_ex_nst, mc.repl_ex_nex, mc.repl_ex_seed, mc.pforce,
187 mc.cpt_period, mc.max_hours, mc.imdport, mc.Flags);
190 /* called by mdrunner() to start a specific number of threads (including
191 the main thread) for thread-parallel runs. This in turn calls mdrunner()
192 for each thread.
193 All options besides nthreads are the same as for mdrunner(). */
194 static t_commrec *mdrunner_start_threads(gmx_hw_opt_t *hw_opt,
195 FILE *fplog, t_commrec *cr, int nfile,
196 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
197 gmx_bool bCompact, int nstglobalcomm,
198 ivec ddxyz, int dd_node_order, real rdd, real rconstr,
199 const char *dddlb_opt, real dlb_scale,
200 const char *ddcsx, const char *ddcsy, const char *ddcsz,
201 const char *nbpu_opt, int nstlist_cmdline,
202 gmx_int64_t nsteps_cmdline,
203 int nstepout, int resetstep,
204 int nmultisim, int repl_ex_nst, int repl_ex_nex, int repl_ex_seed,
205 real pforce, real cpt_period, real max_hours,
206 unsigned long Flags)
208 int ret;
209 struct mdrunner_arglist *mda;
210 t_commrec *crn; /* the new commrec */
211 t_filenm *fnmn;
213 /* first check whether we even need to start tMPI */
214 if (hw_opt->nthreads_tmpi < 2)
216 return cr;
219 /* a few small, one-time, almost unavoidable memory leaks: */
220 snew(mda, 1);
221 fnmn = dup_tfn(nfile, fnm);
223 /* fill the data structure to pass as void pointer to thread start fn */
224 /* hw_opt contains pointers, which should all be NULL at this stage */
225 mda->hw_opt = *hw_opt;
226 mda->fplog = fplog;
227 mda->cr = cr;
228 mda->nfile = nfile;
229 mda->fnm = fnmn;
230 mda->oenv = oenv;
231 mda->bVerbose = bVerbose;
232 mda->bCompact = bCompact;
233 mda->nstglobalcomm = nstglobalcomm;
234 mda->ddxyz[XX] = ddxyz[XX];
235 mda->ddxyz[YY] = ddxyz[YY];
236 mda->ddxyz[ZZ] = ddxyz[ZZ];
237 mda->dd_node_order = dd_node_order;
238 mda->rdd = rdd;
239 mda->rconstr = rconstr;
240 mda->dddlb_opt = dddlb_opt;
241 mda->dlb_scale = dlb_scale;
242 mda->ddcsx = ddcsx;
243 mda->ddcsy = ddcsy;
244 mda->ddcsz = ddcsz;
245 mda->nbpu_opt = nbpu_opt;
246 mda->nstlist_cmdline = nstlist_cmdline;
247 mda->nsteps_cmdline = nsteps_cmdline;
248 mda->nstepout = nstepout;
249 mda->resetstep = resetstep;
250 mda->nmultisim = nmultisim;
251 mda->repl_ex_nst = repl_ex_nst;
252 mda->repl_ex_nex = repl_ex_nex;
253 mda->repl_ex_seed = repl_ex_seed;
254 mda->pforce = pforce;
255 mda->cpt_period = cpt_period;
256 mda->max_hours = max_hours;
257 mda->Flags = Flags;
259 /* now spawn new threads that start mdrunner_start_fn(), while
260 the main thread returns, we set thread affinity later */
261 ret = tMPI_Init_fn(TRUE, hw_opt->nthreads_tmpi, TMPI_AFFINITY_NONE,
262 mdrunner_start_fn, (void*)(mda) );
263 if (ret != TMPI_SUCCESS)
265 return NULL;
268 crn = reinitialize_commrec_for_this_thread(cr);
269 return crn;
272 #endif /* GMX_THREAD_MPI */
275 /* We determine the extra cost of the non-bonded kernels compared to
276 * a reference nstlist value of 10 (which is the default in grompp).
278 static const int nbnxnReferenceNstlist = 10;
279 /* The values to try when switching */
280 const int nstlist_try[] = { 20, 25, 40 };
281 #define NNSTL sizeof(nstlist_try)/sizeof(nstlist_try[0])
282 /* Increase nstlist until the non-bonded cost increases more than listfac_ok,
283 * but never more than listfac_max.
284 * A standard (protein+)water system at 300K with PME ewald_rtol=1e-5
285 * needs 1.28 at rcoulomb=0.9 and 1.24 at rcoulomb=1.0 to get to nstlist=40.
286 * Note that both CPU and GPU factors are conservative. Performance should
287 * not go down due to this tuning, except with a relatively slow GPU.
288 * On the other hand, at medium/high parallelization or with fast GPUs
289 * nstlist will not be increased enough to reach optimal performance.
291 /* CPU: pair-search is about a factor 1.5 slower than the non-bonded kernel */
292 static const float nbnxn_cpu_listfac_ok = 1.05;
293 static const float nbnxn_cpu_listfac_max = 1.09;
294 /* GPU: pair-search is a factor 1.5-3 slower than the non-bonded kernel */
295 static const float nbnxn_gpu_listfac_ok = 1.20;
296 static const float nbnxn_gpu_listfac_max = 1.30;
298 /* Try to increase nstlist when using the Verlet cut-off scheme */
299 static void increase_nstlist(FILE *fp, t_commrec *cr,
300 t_inputrec *ir, int nstlist_cmdline,
301 const gmx_mtop_t *mtop, matrix box,
302 gmx_bool bGPU)
304 float listfac_ok, listfac_max;
305 int nstlist_orig, nstlist_prev;
306 verletbuf_list_setup_t ls;
307 real rlistWithReferenceNstlist, rlist_inc, rlist_ok, rlist_max;
308 real rlist_new, rlist_prev;
309 size_t nstlist_ind = 0;
310 t_state state_tmp;
311 gmx_bool bBox, bDD, bCont;
312 const char *nstl_gpu = "\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
313 const char *nve_err = "Can not increase nstlist because an NVE ensemble is used";
314 const char *vbd_err = "Can not increase nstlist because verlet-buffer-tolerance is not set or used";
315 const char *box_err = "Can not increase nstlist because the box is too small";
316 const char *dd_err = "Can not increase nstlist because of domain decomposition limitations";
317 char buf[STRLEN];
318 const float oneThird = 1.0f / 3.0f;
320 if (nstlist_cmdline <= 0)
322 if (ir->nstlist == 1)
324 /* The user probably set nstlist=1 for a reason,
325 * don't mess with the settings.
327 return;
330 if (fp != NULL && bGPU && ir->nstlist < nstlist_try[0])
332 fprintf(fp, nstl_gpu, ir->nstlist);
334 nstlist_ind = 0;
335 while (nstlist_ind < NNSTL && ir->nstlist >= nstlist_try[nstlist_ind])
337 nstlist_ind++;
339 if (nstlist_ind == NNSTL)
341 /* There are no larger nstlist value to try */
342 return;
346 if (EI_MD(ir->eI) && ir->etc == etcNO)
348 if (MASTER(cr))
350 fprintf(stderr, "%s\n", nve_err);
352 if (fp != NULL)
354 fprintf(fp, "%s\n", nve_err);
357 return;
360 if (ir->verletbuf_tol == 0 && bGPU)
362 gmx_fatal(FARGS, "You are using an old tpr file with a GPU, please generate a new tpr file with an up to date version of grompp");
365 if (ir->verletbuf_tol < 0)
367 if (MASTER(cr))
369 fprintf(stderr, "%s\n", vbd_err);
371 if (fp != NULL)
373 fprintf(fp, "%s\n", vbd_err);
376 return;
379 if (bGPU)
381 listfac_ok = nbnxn_gpu_listfac_ok;
382 listfac_max = nbnxn_gpu_listfac_max;
384 else
386 listfac_ok = nbnxn_cpu_listfac_ok;
387 listfac_max = nbnxn_cpu_listfac_max;
390 nstlist_orig = ir->nstlist;
391 if (nstlist_cmdline > 0)
393 if (fp)
395 sprintf(buf, "Getting nstlist=%d from command line option",
396 nstlist_cmdline);
398 ir->nstlist = nstlist_cmdline;
401 verletbuf_get_list_setup(TRUE, bGPU, &ls);
403 /* Allow rlist to make the list a given factor larger than the list
404 * would be with the reference value for nstlist (10).
406 nstlist_prev = ir->nstlist;
407 ir->nstlist = nbnxnReferenceNstlist;
408 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, NULL,
409 &rlistWithReferenceNstlist);
410 ir->nstlist = nstlist_prev;
412 /* Determine the pair list size increase due to zero interactions */
413 rlist_inc = nbnxn_get_rlist_effective_inc(ls.cluster_size_j,
414 mtop->natoms/det(box));
415 rlist_ok = (rlistWithReferenceNstlist + rlist_inc)*pow(listfac_ok, oneThird) - rlist_inc;
416 rlist_max = (rlistWithReferenceNstlist + rlist_inc)*pow(listfac_max, oneThird) - rlist_inc;
417 if (debug)
419 fprintf(debug, "nstlist tuning: rlist_inc %.3f rlist_ok %.3f rlist_max %.3f\n",
420 rlist_inc, rlist_ok, rlist_max);
423 nstlist_prev = nstlist_orig;
424 rlist_prev = ir->rlist;
427 if (nstlist_cmdline <= 0)
429 ir->nstlist = nstlist_try[nstlist_ind];
432 /* Set the pair-list buffer size in ir */
433 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, NULL, &rlist_new);
435 /* Does rlist fit in the box? */
436 bBox = (sqr(rlist_new) < max_cutoff2(ir->ePBC, box));
437 bDD = TRUE;
438 if (bBox && DOMAINDECOMP(cr))
440 /* Check if rlist fits in the domain decomposition */
441 if (inputrec2nboundeddim(ir) < DIM)
443 gmx_incons("Changing nstlist with domain decomposition and unbounded dimensions is not implemented yet");
445 copy_mat(box, state_tmp.box);
446 bDD = change_dd_cutoff(cr, &state_tmp, ir, rlist_new);
449 if (debug)
451 fprintf(debug, "nstlist %d rlist %.3f bBox %d bDD %d\n",
452 ir->nstlist, rlist_new, bBox, bDD);
455 bCont = FALSE;
457 if (nstlist_cmdline <= 0)
459 if (bBox && bDD && rlist_new <= rlist_max)
461 /* Increase nstlist */
462 nstlist_prev = ir->nstlist;
463 rlist_prev = rlist_new;
464 bCont = (nstlist_ind+1 < NNSTL && rlist_new < rlist_ok);
466 else
468 /* Stick with the previous nstlist */
469 ir->nstlist = nstlist_prev;
470 rlist_new = rlist_prev;
471 bBox = TRUE;
472 bDD = TRUE;
476 nstlist_ind++;
478 while (bCont);
480 if (!bBox || !bDD)
482 gmx_warning(!bBox ? box_err : dd_err);
483 if (fp != NULL)
485 fprintf(fp, "\n%s\n", bBox ? box_err : dd_err);
487 ir->nstlist = nstlist_orig;
489 else if (ir->nstlist != nstlist_orig || rlist_new != ir->rlist)
491 sprintf(buf, "Changing nstlist from %d to %d, rlist from %g to %g",
492 nstlist_orig, ir->nstlist,
493 ir->rlist, rlist_new);
494 if (MASTER(cr))
496 fprintf(stderr, "%s\n\n", buf);
498 if (fp != NULL)
500 fprintf(fp, "%s\n\n", buf);
502 ir->rlist = rlist_new;
503 ir->rlistlong = rlist_new;
507 static void prepare_verlet_scheme(FILE *fplog,
508 t_commrec *cr,
509 t_inputrec *ir,
510 int nstlist_cmdline,
511 const gmx_mtop_t *mtop,
512 matrix box,
513 gmx_bool bUseGPU)
515 /* For NVE simulations, we will retain the initial list buffer */
516 if (ir->verletbuf_tol > 0 && !(EI_MD(ir->eI) && ir->etc == etcNO))
518 /* Update the Verlet buffer size for the current run setup */
519 verletbuf_list_setup_t ls;
520 real rlist_new;
522 /* Here we assume SIMD-enabled kernels are being used. But as currently
523 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
524 * and 4x2 gives a larger buffer than 4x4, this is ok.
526 verletbuf_get_list_setup(TRUE, bUseGPU, &ls);
528 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, NULL, &rlist_new);
530 if (rlist_new != ir->rlist)
532 if (fplog != NULL)
534 fprintf(fplog, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
535 ir->rlist, rlist_new,
536 ls.cluster_size_i, ls.cluster_size_j);
538 ir->rlist = rlist_new;
539 ir->rlistlong = rlist_new;
543 if (nstlist_cmdline > 0 && (!EI_DYNAMICS(ir->eI) || ir->verletbuf_tol <= 0))
545 gmx_fatal(FARGS, "Can not set nstlist without %s",
546 !EI_DYNAMICS(ir->eI) ? "dynamics" : "verlet-buffer-tolerance");
549 if (EI_DYNAMICS(ir->eI))
551 /* Set or try nstlist values */
552 increase_nstlist(fplog, cr, ir, nstlist_cmdline, mtop, box, bUseGPU);
556 /* Override the value in inputrec with value passed on the command line (if any) */
557 static void override_nsteps_cmdline(FILE *fplog,
558 gmx_int64_t nsteps_cmdline,
559 t_inputrec *ir,
560 const t_commrec *cr)
562 assert(ir);
563 assert(cr);
565 /* override with anything else than the default -2 */
566 if (nsteps_cmdline > -2)
568 char sbuf_steps[STEPSTRSIZE];
569 char sbuf_msg[STRLEN];
571 ir->nsteps = nsteps_cmdline;
572 if (EI_DYNAMICS(ir->eI) && nsteps_cmdline != -1)
574 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
575 gmx_step_str(nsteps_cmdline, sbuf_steps),
576 fabs(nsteps_cmdline*ir->delta_t));
578 else
580 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps",
581 gmx_step_str(nsteps_cmdline, sbuf_steps));
584 md_print_warn(cr, fplog, "%s\n", sbuf_msg);
586 else if (nsteps_cmdline < -2)
588 gmx_fatal(FARGS, "Invalid nsteps value passed on the command line: %d",
589 nsteps_cmdline);
591 /* Do nothing if nsteps_cmdline == -2 */
594 int mdrunner(gmx_hw_opt_t *hw_opt,
595 FILE *fplog, t_commrec *cr, int nfile,
596 const t_filenm fnm[], const output_env_t oenv, gmx_bool bVerbose,
597 gmx_bool bCompact, int nstglobalcomm,
598 ivec ddxyz, int dd_node_order, real rdd, real rconstr,
599 const char *dddlb_opt, real dlb_scale,
600 const char *ddcsx, const char *ddcsy, const char *ddcsz,
601 const char *nbpu_opt, int nstlist_cmdline,
602 gmx_int64_t nsteps_cmdline, int nstepout, int resetstep,
603 int gmx_unused nmultisim, int repl_ex_nst, int repl_ex_nex,
604 int repl_ex_seed, real pforce, real cpt_period, real max_hours,
605 int imdport, unsigned long Flags)
607 gmx_bool bForceUseGPU, bTryUseGPU, bRerunMD, bCantUseGPU;
608 t_inputrec *inputrec;
609 t_state *state = NULL;
610 matrix box;
611 gmx_ddbox_t ddbox = {0};
612 int npme_major, npme_minor;
613 t_nrnb *nrnb;
614 gmx_mtop_t *mtop = NULL;
615 t_mdatoms *mdatoms = NULL;
616 t_forcerec *fr = NULL;
617 t_fcdata *fcd = NULL;
618 real ewaldcoeff_q = 0;
619 real ewaldcoeff_lj = 0;
620 struct gmx_pme_t **pmedata = NULL;
621 gmx_vsite_t *vsite = NULL;
622 gmx_constr_t constr;
623 int nChargePerturbed = -1, nTypePerturbed = 0, status;
624 gmx_wallcycle_t wcycle;
625 gmx_bool bReadEkin;
626 gmx_walltime_accounting_t walltime_accounting = NULL;
627 int rc;
628 gmx_int64_t reset_counters;
629 gmx_edsam_t ed = NULL;
630 int nthreads_pme = 1;
631 int nthreads_pp = 1;
632 gmx_membed_t membed = NULL;
633 gmx_hw_info_t *hwinfo = NULL;
634 /* The master rank decides early on bUseGPU and broadcasts this later */
635 gmx_bool bUseGPU = FALSE;
637 /* CAUTION: threads may be started later on in this function, so
638 cr doesn't reflect the final parallel state right now */
639 snew(inputrec, 1);
640 snew(mtop, 1);
642 if (Flags & MD_APPENDFILES)
644 fplog = NULL;
647 bRerunMD = (Flags & MD_RERUN);
648 bForceUseGPU = (strncmp(nbpu_opt, "gpu", 3) == 0);
649 bTryUseGPU = (strncmp(nbpu_opt, "auto", 4) == 0) || bForceUseGPU;
650 /* Rerun execution time is dominated by I/O and pair search, so
651 * GPUs are not very useful, plus they do not support more than
652 * one energy group. Don't select them when they can't be used,
653 * unless the user requested it, then fatal_error is called later.
655 * TODO it would be nice to notify the user that if this check
656 * causes GPUs not to be used that this is what is happening, and
657 * why, but that will be easier to do after some future
658 * cleanup. */
659 bCantUseGPU = bRerunMD && (inputrec->opts.ngener > 1);
660 bTryUseGPU = bTryUseGPU && !(bCantUseGPU && !bForceUseGPU);
662 /* Detect hardware, gather information. This is an operation that is
663 * global for this process (MPI rank). */
664 hwinfo = gmx_detect_hardware(fplog, cr, bTryUseGPU);
666 gmx_print_detected_hardware(fplog, cr, hwinfo);
668 if (fplog != NULL)
670 /* Print references after all software/hardware printing */
671 please_cite(fplog, "Pall2015");
672 please_cite(fplog, "Pronk2013");
673 please_cite(fplog, "Hess2008b");
674 please_cite(fplog, "Spoel2005a");
675 please_cite(fplog, "Lindahl2001a");
676 please_cite(fplog, "Berendsen95a");
679 snew(state, 1);
680 if (SIMMASTER(cr))
682 /* Read (nearly) all data required for the simulation */
683 read_tpx_state(ftp2fn(efTPR, nfile, fnm), inputrec, state, NULL, mtop);
685 if (inputrec->cutoff_scheme == ecutsVERLET)
687 /* Here the master rank decides if all ranks will use GPUs */
688 bUseGPU = (hwinfo->gpu_info.n_dev_compatible > 0 ||
689 getenv("GMX_EMULATE_GPU") != NULL);
691 /* TODO add GPU kernels for this and replace this check by:
692 * (bUseGPU && (ir->vdwtype == evdwPME &&
693 * ir->ljpme_combination_rule == eljpmeLB))
694 * update the message text and the content of nbnxn_acceleration_supported.
696 if (bUseGPU &&
697 !nbnxn_acceleration_supported(fplog, cr, inputrec, bUseGPU))
699 /* Fallback message printed by nbnxn_acceleration_supported */
700 if (bForceUseGPU)
702 gmx_fatal(FARGS, "GPU acceleration requested, but not supported with the given input settings");
704 bUseGPU = FALSE;
707 prepare_verlet_scheme(fplog, cr,
708 inputrec, nstlist_cmdline, mtop, state->box,
709 bUseGPU);
711 else
713 if (nstlist_cmdline > 0)
715 gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme");
718 if (hwinfo->gpu_info.n_dev_compatible > 0)
720 md_print_warn(cr, fplog,
721 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
722 " To use a GPU, set the mdp option: cutoff-scheme = Verlet\n");
725 if (bForceUseGPU)
727 gmx_fatal(FARGS, "GPU requested, but can't be used without cutoff-scheme=Verlet");
730 #ifdef GMX_TARGET_BGQ
731 md_print_warn(cr, fplog,
732 "NOTE: There is no SIMD implementation of the group scheme kernels on\n"
733 " BlueGene/Q. You will observe better performance from using the\n"
734 " Verlet cut-off scheme.\n");
735 #endif
738 if (inputrec->eI == eiSD2)
740 md_print_warn(cr, fplog, "The stochastic dynamics integrator %s is deprecated, since\n"
741 "it is slower than integrator %s and is slightly less accurate\n"
742 "with constraints. Use the %s integrator.",
743 ei_names[inputrec->eI], ei_names[eiSD1], ei_names[eiSD1]);
747 /* Check and update the hardware options for internal consistency */
748 check_and_update_hw_opt_1(hw_opt, SIMMASTER(cr));
750 /* Early check for externally set process affinity. */
751 gmx_check_thread_affinity_set(fplog, cr,
752 hw_opt, hwinfo->nthreads_hw_avail, FALSE);
753 if (SIMMASTER(cr))
756 #ifdef GMX_THREAD_MPI
757 if (cr->npmenodes > 0 && hw_opt->nthreads_tmpi <= 0)
759 gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
761 #endif
763 if (hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp &&
764 cr->npmenodes <= 0)
766 gmx_fatal(FARGS, "You need to explicitly specify the number of PME ranks (-npme) when using different number of OpenMP threads for PP and PME ranks");
770 #ifdef GMX_THREAD_MPI
771 if (SIMMASTER(cr))
773 /* Since the master knows the cut-off scheme, update hw_opt for this.
774 * This is done later for normal MPI and also once more with tMPI
775 * for all tMPI ranks.
777 check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme);
779 /* NOW the threads will be started: */
780 hw_opt->nthreads_tmpi = get_nthreads_mpi(hwinfo,
781 hw_opt,
782 inputrec, mtop,
783 cr, fplog);
785 if (hw_opt->nthreads_tmpi > 1)
787 t_commrec *cr_old = cr;
788 /* now start the threads. */
789 cr = mdrunner_start_threads(hw_opt, fplog, cr_old, nfile, fnm,
790 oenv, bVerbose, bCompact, nstglobalcomm,
791 ddxyz, dd_node_order, rdd, rconstr,
792 dddlb_opt, dlb_scale, ddcsx, ddcsy, ddcsz,
793 nbpu_opt, nstlist_cmdline,
794 nsteps_cmdline, nstepout, resetstep, nmultisim,
795 repl_ex_nst, repl_ex_nex, repl_ex_seed, pforce,
796 cpt_period, max_hours,
797 Flags);
798 /* the main thread continues here with a new cr. We don't deallocate
799 the old cr because other threads may still be reading it. */
800 if (cr == NULL)
802 gmx_comm("Failed to spawn threads");
806 #endif
807 /* END OF CAUTION: cr is now reliable */
809 /* g_membed initialisation *
810 * Because we change the mtop, init_membed is called before the init_parallel *
811 * (in case we ever want to make it run in parallel) */
812 if (opt2bSet("-membed", nfile, fnm))
814 if (MASTER(cr))
816 fprintf(stderr, "Initializing membed");
818 membed = init_membed(fplog, nfile, fnm, mtop, inputrec, state, cr, &cpt_period);
821 if (PAR(cr))
823 /* now broadcast everything to the non-master nodes/threads: */
824 init_parallel(cr, inputrec, mtop);
826 /* The master rank decided on the use of GPUs,
827 * broadcast this information to all ranks.
829 gmx_bcast_sim(sizeof(bUseGPU), &bUseGPU, cr);
832 if (fplog != NULL)
834 pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
835 fprintf(fplog, "\n");
838 /* now make sure the state is initialized and propagated */
839 set_state_entries(state, inputrec);
841 /* A parallel command line option consistency check that we can
842 only do after any threads have started. */
843 if (!PAR(cr) &&
844 (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || cr->npmenodes > 0))
846 gmx_fatal(FARGS,
847 "The -dd or -npme option request a parallel simulation, "
848 #ifndef GMX_MPI
849 "but %s was compiled without threads or MPI enabled"
850 #else
851 #ifdef GMX_THREAD_MPI
852 "but the number of threads (option -nt) is 1"
853 #else
854 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
855 #endif
856 #endif
857 , output_env_get_program_display_name(oenv)
861 if (bRerunMD &&
862 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
864 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
867 if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr))
869 gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
872 if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)))
874 if (cr->npmenodes > 0)
876 gmx_fatal_collective(FARGS, cr, NULL,
877 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
880 cr->npmenodes = 0;
883 if (bUseGPU && cr->npmenodes < 0)
885 /* With GPUs we don't automatically use PME-only ranks. PME ranks can
886 * improve performance with many threads per GPU, since our OpenMP
887 * scaling is bad, but it's difficult to automate the setup.
889 cr->npmenodes = 0;
892 #ifdef GMX_FAHCORE
893 if (MASTER(cr))
895 fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
897 #endif
899 /* NMR restraints must be initialized before load_checkpoint,
900 * since with time averaging the history is added to t_state.
901 * For proper consistency check we therefore need to extend
902 * t_state here.
903 * So the PME-only nodes (if present) will also initialize
904 * the distance restraints.
906 snew(fcd, 1);
908 /* This needs to be called before read_checkpoint to extend the state */
909 init_disres(fplog, mtop, inputrec, cr, fcd, state, repl_ex_nst > 0);
911 init_orires(fplog, mtop, state->x, inputrec, cr, &(fcd->orires),
912 state);
914 if (DEFORM(*inputrec))
916 /* Store the deform reference box before reading the checkpoint */
917 if (SIMMASTER(cr))
919 copy_mat(state->box, box);
921 if (PAR(cr))
923 gmx_bcast(sizeof(box), box, cr);
925 /* Because we do not have the update struct available yet
926 * in which the reference values should be stored,
927 * we store them temporarily in static variables.
928 * This should be thread safe, since they are only written once
929 * and with identical values.
931 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
932 deform_init_init_step_tpx = inputrec->init_step;
933 copy_mat(box, deform_init_box_tpx);
934 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
937 if (opt2bSet("-cpi", nfile, fnm))
939 /* Check if checkpoint file exists before doing continuation.
940 * This way we can use identical input options for the first and subsequent runs...
942 if (gmx_fexist_master(opt2fn_master("-cpi", nfile, fnm, cr), cr) )
944 load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog,
945 cr, ddxyz,
946 inputrec, state, &bReadEkin,
947 (Flags & MD_APPENDFILES),
948 (Flags & MD_APPENDFILESSET));
950 if (bReadEkin)
952 Flags |= MD_READ_EKIN;
957 if (MASTER(cr) && (Flags & MD_APPENDFILES))
959 gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr,
960 Flags, &fplog);
963 /* override nsteps with value from cmdline */
964 override_nsteps_cmdline(fplog, nsteps_cmdline, inputrec, cr);
966 if (SIMMASTER(cr))
968 copy_mat(state->box, box);
971 if (PAR(cr))
973 gmx_bcast(sizeof(box), box, cr);
976 /* Essential dynamics */
977 if (opt2bSet("-ei", nfile, fnm))
979 /* Open input and output files, allocate space for ED data structure */
980 ed = ed_open(mtop->natoms, &state->edsamstate, nfile, fnm, Flags, oenv, cr);
983 if (PAR(cr) && !(EI_TPI(inputrec->eI) ||
984 inputrec->eI == eiNM))
986 cr->dd = init_domain_decomposition(fplog, cr, Flags, ddxyz, rdd, rconstr,
987 dddlb_opt, dlb_scale,
988 ddcsx, ddcsy, ddcsz,
989 mtop, inputrec,
990 box, state->x,
991 &ddbox, &npme_major, &npme_minor);
993 make_dd_communicators(fplog, cr, dd_node_order);
995 /* Set overallocation to avoid frequent reallocation of arrays */
996 set_over_alloc_dd(TRUE);
998 else
1000 /* PME, if used, is done on all nodes with 1D decomposition */
1001 cr->npmenodes = 0;
1002 cr->duty = (DUTY_PP | DUTY_PME);
1003 npme_major = 1;
1004 npme_minor = 1;
1006 if (inputrec->ePBC == epbcSCREW)
1008 gmx_fatal(FARGS,
1009 "pbc=%s is only implemented with domain decomposition",
1010 epbc_names[inputrec->ePBC]);
1014 if (PAR(cr))
1016 /* After possible communicator splitting in make_dd_communicators.
1017 * we can set up the intra/inter node communication.
1019 gmx_setup_nodecomm(fplog, cr);
1022 /* Initialize per-physical-node MPI process/thread ID and counters. */
1023 gmx_init_intranode_counters(cr);
1024 #ifdef GMX_MPI
1025 if (MULTISIM(cr))
1027 md_print_info(cr, fplog,
1028 "This is simulation %d out of %d running as a composite GROMACS\n"
1029 "multi-simulation job. Setup for this simulation:\n\n",
1030 cr->ms->sim, cr->ms->nsim);
1032 md_print_info(cr, fplog, "Using %d MPI %s\n",
1033 cr->nnodes,
1034 #ifdef GMX_THREAD_MPI
1035 cr->nnodes == 1 ? "thread" : "threads"
1036 #else
1037 cr->nnodes == 1 ? "process" : "processes"
1038 #endif
1040 fflush(stderr);
1041 #endif
1043 /* Check and update hw_opt for the cut-off scheme */
1044 check_and_update_hw_opt_2(hw_opt, inputrec->cutoff_scheme);
1046 gmx_omp_nthreads_init(fplog, cr,
1047 hwinfo->nthreads_hw_avail,
1048 hw_opt->nthreads_omp,
1049 hw_opt->nthreads_omp_pme,
1050 (cr->duty & DUTY_PP) == 0,
1051 inputrec->cutoff_scheme == ecutsVERLET);
1053 #ifndef NDEBUG
1054 if (integrator[inputrec->eI].func != do_tpi &&
1055 inputrec->cutoff_scheme == ecutsVERLET)
1057 gmx_feenableexcept();
1059 #endif
1061 if (bUseGPU)
1063 /* Select GPU id's to use */
1064 gmx_select_gpu_ids(fplog, cr, &hwinfo->gpu_info, bForceUseGPU,
1065 &hw_opt->gpu_opt);
1067 else
1069 /* Ignore (potentially) manually selected GPUs */
1070 hw_opt->gpu_opt.n_dev_use = 0;
1073 /* check consistency across ranks of things like SIMD
1074 * support and number of GPUs selected */
1075 gmx_check_hw_runconf_consistency(fplog, hwinfo, cr, hw_opt, bUseGPU);
1077 /* Now that we know the setup is consistent, check for efficiency */
1078 check_resource_division_efficiency(hwinfo, hw_opt, Flags & MD_NTOMPSET,
1079 cr, fplog);
1081 if (DOMAINDECOMP(cr))
1083 /* When we share GPUs over ranks, we need to know this for the DLB */
1084 dd_setup_dlb_resource_sharing(cr, hwinfo, hw_opt);
1087 /* getting number of PP/PME threads
1088 PME: env variable should be read only on one node to make sure it is
1089 identical everywhere;
1091 /* TODO nthreads_pp is only used for pinning threads.
1092 * This is a temporary solution until we have a hw topology library.
1094 nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
1095 nthreads_pme = gmx_omp_nthreads_get(emntPME);
1097 wcycle = wallcycle_init(fplog, resetstep, cr, nthreads_pp, nthreads_pme);
1099 if (PAR(cr))
1101 /* Master synchronizes its value of reset_counters with all nodes
1102 * including PME only nodes */
1103 reset_counters = wcycle_get_reset_counters(wcycle);
1104 gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
1105 wcycle_set_reset_counters(wcycle, reset_counters);
1108 snew(nrnb, 1);
1109 if (cr->duty & DUTY_PP)
1111 bcast_state(cr, state);
1113 /* Initiate forcerecord */
1114 fr = mk_forcerec();
1115 fr->hwinfo = hwinfo;
1116 fr->gpu_opt = &hw_opt->gpu_opt;
1117 init_forcerec(fplog, oenv, fr, fcd, inputrec, mtop, cr, box,
1118 opt2fn("-table", nfile, fnm),
1119 opt2fn("-tabletf", nfile, fnm),
1120 opt2fn("-tablep", nfile, fnm),
1121 opt2fn("-tableb", nfile, fnm),
1122 nbpu_opt,
1123 FALSE,
1124 pforce);
1126 /* version for PCA_NOT_READ_NODE (see md.c) */
1127 /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
1128 "nofile","nofile","nofile","nofile",FALSE,pforce);
1131 /* Initialize QM-MM */
1132 if (fr->bQMMM)
1134 init_QMMMrec(cr, mtop, inputrec, fr);
1137 /* Initialize the mdatoms structure.
1138 * mdatoms is not filled with atom data,
1139 * as this can not be done now with domain decomposition.
1141 mdatoms = init_mdatoms(fplog, mtop, inputrec->efep != efepNO);
1143 /* Initialize the virtual site communication */
1144 vsite = init_vsite(mtop, cr, FALSE);
1146 calc_shifts(box, fr->shift_vec);
1148 /* With periodic molecules the charge groups should be whole at start up
1149 * and the virtual sites should not be far from their proper positions.
1151 if (!inputrec->bContinuation && MASTER(cr) &&
1152 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
1154 /* Make molecules whole at start of run */
1155 if (fr->ePBC != epbcNONE)
1157 do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, state->x);
1159 if (vsite)
1161 /* Correct initial vsite positions are required
1162 * for the initial distribution in the domain decomposition
1163 * and for the initial shell prediction.
1165 construct_vsites_mtop(vsite, mtop, state->x);
1169 if (EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype))
1171 ewaldcoeff_q = fr->ewaldcoeff_q;
1172 ewaldcoeff_lj = fr->ewaldcoeff_lj;
1173 pmedata = &fr->pmedata;
1175 else
1177 pmedata = NULL;
1180 else
1182 /* This is a PME only node */
1184 /* We don't need the state */
1185 done_state(state);
1187 ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol);
1188 ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj);
1189 snew(pmedata, 1);
1192 if (hw_opt->thread_affinity != threadaffOFF)
1194 /* Before setting affinity, check whether the affinity has changed
1195 * - which indicates that probably the OpenMP library has changed it
1196 * since we first checked).
1198 gmx_check_thread_affinity_set(fplog, cr,
1199 hw_opt, hwinfo->nthreads_hw_avail, TRUE);
1201 /* Set the CPU affinity */
1202 gmx_set_thread_affinity(fplog, cr, hw_opt, hwinfo);
1205 /* Initiate PME if necessary,
1206 * either on all nodes or on dedicated PME nodes only. */
1207 if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
1209 if (mdatoms)
1211 nChargePerturbed = mdatoms->nChargePerturbed;
1212 if (EVDW_PME(inputrec->vdwtype))
1214 nTypePerturbed = mdatoms->nTypePerturbed;
1217 if (cr->npmenodes > 0)
1219 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1220 gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
1221 gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
1224 if (cr->duty & DUTY_PME)
1226 status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec,
1227 mtop ? mtop->natoms : 0, nChargePerturbed, nTypePerturbed,
1228 (Flags & MD_REPRODUCIBLE), nthreads_pme);
1229 if (status != 0)
1231 gmx_fatal(FARGS, "Error %d initializing PME", status);
1237 if (integrator[inputrec->eI].func == do_md)
1239 /* Turn on signal handling on all nodes */
1241 * (A user signal from the PME nodes (if any)
1242 * is communicated to the PP nodes.
1244 signal_handler_install();
1247 if (cr->duty & DUTY_PP)
1249 /* Assumes uniform use of the number of OpenMP threads */
1250 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
1252 if (inputrec->bPull)
1254 /* Initialize pull code */
1255 inputrec->pull_work =
1256 init_pull(fplog, inputrec->pull, inputrec, nfile, fnm,
1257 mtop, cr, oenv, inputrec->fepvals->init_lambda,
1258 EI_DYNAMICS(inputrec->eI) && MASTER(cr), Flags);
1261 if (inputrec->bRot)
1263 /* Initialize enforced rotation code */
1264 init_rot(fplog, inputrec, nfile, fnm, cr, state->x, box, mtop, oenv,
1265 bVerbose, Flags);
1268 if (inputrec->eSwapCoords != eswapNO)
1270 /* Initialize ion swapping code */
1271 init_swapcoords(fplog, bVerbose, inputrec, opt2fn_master("-swap", nfile, fnm, cr),
1272 mtop, state->x, state->box, &state->swapstate, cr, oenv, Flags);
1275 constr = init_constraints(fplog, mtop, inputrec, ed, state, cr);
1277 if (DOMAINDECOMP(cr))
1279 GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP");
1280 dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec,
1281 Flags & MD_DDBONDCHECK, fr->cginfo_mb);
1283 set_dd_parameters(fplog, cr->dd, dlb_scale, inputrec, &ddbox);
1285 setup_dd_grid(fplog, cr->dd);
1288 /* Now do whatever the user wants us to do (how flexible...) */
1289 integrator[inputrec->eI].func(fplog, cr, nfile, fnm,
1290 oenv, bVerbose, bCompact,
1291 nstglobalcomm,
1292 vsite, constr,
1293 nstepout, inputrec, mtop,
1294 fcd, state,
1295 mdatoms, nrnb, wcycle, ed, fr,
1296 repl_ex_nst, repl_ex_nex, repl_ex_seed,
1297 membed,
1298 cpt_period, max_hours,
1299 imdport,
1300 Flags,
1301 walltime_accounting);
1303 if (inputrec->bPull)
1305 finish_pull(inputrec->pull_work);
1308 if (inputrec->bRot)
1310 finish_rot(inputrec->rot);
1314 else
1316 GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
1317 /* do PME only */
1318 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
1319 gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff_q, ewaldcoeff_lj, inputrec);
1322 wallcycle_stop(wcycle, ewcRUN);
1324 /* Finish up, write some stuff
1325 * if rerunMD, don't write last frame again
1327 finish_run(fplog, cr,
1328 inputrec, nrnb, wcycle, walltime_accounting,
1329 fr ? fr->nbv : NULL,
1330 EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
1333 /* Free GPU memory and context */
1334 free_gpu_resources(fr, cr, &hwinfo->gpu_info, fr ? fr->gpu_opt : NULL);
1336 if (opt2bSet("-membed", nfile, fnm))
1338 sfree(membed);
1341 gmx_hardware_info_free(hwinfo);
1343 /* Does what it says */
1344 print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
1345 walltime_accounting_destroy(walltime_accounting);
1347 /* Close logfile already here if we were appending to it */
1348 if (MASTER(cr) && (Flags & MD_APPENDFILES))
1350 gmx_log_close(fplog);
1353 rc = (int)gmx_get_stop_condition();
1355 done_ed(&ed);
1357 #ifdef GMX_THREAD_MPI
1358 /* we need to join all threads. The sub-threads join when they
1359 exit this function, but the master thread needs to be told to
1360 wait for that. */
1361 if (PAR(cr) && MASTER(cr))
1363 tMPI_Finalize();
1365 #endif
1367 return rc;