2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements the MD runner routine calling all integrators.
41 * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42 * \ingroup module_mdlib
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/domdec.h"
59 #include "gromacs/domdec/domdec_struct.h"
60 #include "gromacs/ewald/pme.h"
61 #include "gromacs/fileio/checkpoint.h"
62 #include "gromacs/fileio/oenv.h"
63 #include "gromacs/fileio/tpxio.h"
64 #include "gromacs/gmxlib/network.h"
65 #include "gromacs/gpu_utils/gpu_utils.h"
66 #include "gromacs/hardware/cpuinfo.h"
67 #include "gromacs/hardware/detecthardware.h"
68 #include "gromacs/hardware/hardwareassign.h"
69 #include "gromacs/hardware/printhardware.h"
70 #include "gromacs/listed-forces/disre.h"
71 #include "gromacs/listed-forces/orires.h"
72 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
73 #include "gromacs/math/functions.h"
74 #include "gromacs/math/utilities.h"
75 #include "gromacs/math/vec.h"
76 #include "gromacs/mdlib/calc_verletbuf.h"
77 #include "gromacs/mdlib/constr.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/integrator.h"
82 #include "gromacs/mdlib/main.h"
83 #include "gromacs/mdlib/md_support.h"
84 #include "gromacs/mdlib/mdatoms.h"
85 #include "gromacs/mdlib/mdrun.h"
86 #include "gromacs/mdlib/minimize.h"
87 #include "gromacs/mdlib/nbnxn_search.h"
88 #include "gromacs/mdlib/nbnxn_tuning.h"
89 #include "gromacs/mdlib/qmmm.h"
90 #include "gromacs/mdlib/sighandler.h"
91 #include "gromacs/mdlib/sim_util.h"
92 #include "gromacs/mdlib/tpi.h"
93 #include "gromacs/mdrunutility/mdmodules.h"
94 #include "gromacs/mdrunutility/threadaffinity.h"
95 #include "gromacs/mdtypes/commrec.h"
96 #include "gromacs/mdtypes/edsamhistory.h"
97 #include "gromacs/mdtypes/energyhistory.h"
98 #include "gromacs/mdtypes/inputrec.h"
99 #include "gromacs/mdtypes/md_enums.h"
100 #include "gromacs/mdtypes/observableshistory.h"
101 #include "gromacs/mdtypes/state.h"
102 #include "gromacs/mdtypes/swaphistory.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/pulling/pull.h"
105 #include "gromacs/pulling/pull_rotation.h"
106 #include "gromacs/timing/wallcycle.h"
107 #include "gromacs/topology/mtop_util.h"
108 #include "gromacs/trajectory/trajectoryframe.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/filestream.h"
113 #include "gromacs/utility/gmxassert.h"
114 #include "gromacs/utility/gmxmpi.h"
115 #include "gromacs/utility/logger.h"
116 #include "gromacs/utility/loggerbuilder.h"
117 #include "gromacs/utility/pleasecite.h"
118 #include "gromacs/utility/programcontext.h"
119 #include "gromacs/utility/smalloc.h"
125 #include "resource-division.h"
128 #include "corewrap.h"
131 //! First step used in pressure scaling
132 gmx_int64_t deform_init_init_step_tpx
;
133 //! Initial box for pressure scaling
134 matrix deform_init_box_tpx
;
135 //! MPI variable for use in pressure scaling
136 tMPI_Thread_mutex_t deform_init_box_mutex
= TMPI_THREAD_MUTEX_INITIALIZER
;
139 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
140 * the number of threads will get lowered.
142 #define MIN_ATOMS_PER_MPI_THREAD 90
143 #define MIN_ATOMS_PER_GPU 900
148 void Mdrunner::reinitializeOnSpawnedThread()
150 // TODO This duplication is formally necessary if any thread might
151 // modify any memory in fnm or the pointers it contains. If the
152 // contents are ever provably const, then we can remove this
153 // allocation (and memory leak).
154 // TODO This should probably become part of a copy constructor for
156 fnm
= dup_tfn(nfile
, fnm
);
158 cr
= reinitialize_commrec_for_this_thread(cr
);
162 // Only the master rank writes to the log files
167 /*! \brief The callback used for running on spawned threads.
169 * Obtains the pointer to the master mdrunner object from the one
170 * argument permitted to the thread-launch API call, copies it to make
171 * a new runner for this thread, reinitializes necessary data, and
172 * proceeds to the simulation. */
173 static void mdrunner_start_fn(void *arg
)
177 auto masterMdrunner
= reinterpret_cast<const gmx::Mdrunner
*>(arg
);
178 /* copy the arg list to make sure that it's thread-local. This
179 doesn't copy pointed-to items, of course, but those are all
181 gmx::Mdrunner mdrunner
= *masterMdrunner
;
182 mdrunner
.reinitializeOnSpawnedThread();
185 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
189 /*! \brief Start thread-MPI threads.
191 * Called by mdrunner() to start a specific number of threads
192 * (including the main thread) for thread-parallel runs. This in turn
193 * calls mdrunner() for each thread. All options are the same as for
195 t_commrec
*Mdrunner::spawnThreads(int numThreadsToLaunch
)
198 /* first check whether we even need to start tMPI */
199 if (numThreadsToLaunch
< 2)
204 gmx::Mdrunner spawnedMdrunner
= *this;
205 // TODO This duplication is formally necessary if any thread might
206 // modify any memory in fnm or the pointers it contains. If the
207 // contents are ever provably const, then we can remove this
208 // allocation (and memory leak).
209 // TODO This should probably become part of a copy constructor for
211 spawnedMdrunner
.fnm
= dup_tfn(this->nfile
, fnm
);
213 /* now spawn new threads that start mdrunner_start_fn(), while
214 the main thread returns, we set thread affinity later */
215 if (tMPI_Init_fn(TRUE
, numThreadsToLaunch
, TMPI_AFFINITY_NONE
,
216 mdrunner_start_fn
, static_cast<void*>(&spawnedMdrunner
)) != TMPI_SUCCESS
)
218 GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads"));
221 return reinitialize_commrec_for_this_thread(cr
);
226 #endif /* GMX_THREAD_MPI */
228 /*! \brief Initialize variables for Verlet scheme simulation */
229 static void prepare_verlet_scheme(FILE *fplog
,
233 const gmx_mtop_t
*mtop
,
235 bool makeGpuPairList
,
236 const gmx::CpuInfo
&cpuinfo
)
238 /* For NVE simulations, we will retain the initial list buffer */
239 if (EI_DYNAMICS(ir
->eI
) &&
240 ir
->verletbuf_tol
> 0 &&
241 !(EI_MD(ir
->eI
) && ir
->etc
== etcNO
))
243 /* Update the Verlet buffer size for the current run setup */
244 verletbuf_list_setup_t ls
;
247 /* Here we assume SIMD-enabled kernels are being used. But as currently
248 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
249 * and 4x2 gives a larger buffer than 4x4, this is ok.
251 verletbuf_get_list_setup(true, makeGpuPairList
, &ls
);
253 calc_verlet_buffer_size(mtop
, det(box
), ir
, ir
->nstlist
, ir
->nstlist
- 1, -1, &ls
, nullptr, &rlist_new
);
255 if (rlist_new
!= ir
->rlist
)
257 if (fplog
!= nullptr)
259 fprintf(fplog
, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
260 ir
->rlist
, rlist_new
,
261 ls
.cluster_size_i
, ls
.cluster_size_j
);
263 ir
->rlist
= rlist_new
;
267 if (nstlist_cmdline
> 0 && (!EI_DYNAMICS(ir
->eI
) || ir
->verletbuf_tol
<= 0))
269 gmx_fatal(FARGS
, "Can not set nstlist without %s",
270 !EI_DYNAMICS(ir
->eI
) ? "dynamics" : "verlet-buffer-tolerance");
273 if (EI_DYNAMICS(ir
->eI
))
275 /* Set or try nstlist values */
276 increaseNstlist(fplog
, cr
, ir
, nstlist_cmdline
, mtop
, box
, makeGpuPairList
, cpuinfo
);
280 /*! \brief Override the nslist value in inputrec
282 * with value passed on the command line (if any)
284 static void override_nsteps_cmdline(const gmx::MDLogger
&mdlog
,
285 gmx_int64_t nsteps_cmdline
,
290 /* override with anything else than the default -2 */
291 if (nsteps_cmdline
> -2)
293 char sbuf_steps
[STEPSTRSIZE
];
294 char sbuf_msg
[STRLEN
];
296 ir
->nsteps
= nsteps_cmdline
;
297 if (EI_DYNAMICS(ir
->eI
) && nsteps_cmdline
!= -1)
299 sprintf(sbuf_msg
, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
300 gmx_step_str(nsteps_cmdline
, sbuf_steps
),
301 fabs(nsteps_cmdline
*ir
->delta_t
));
305 sprintf(sbuf_msg
, "Overriding nsteps with value passed on the command line: %s steps",
306 gmx_step_str(nsteps_cmdline
, sbuf_steps
));
309 GMX_LOG(mdlog
.warning
).asParagraph().appendText(sbuf_msg
);
311 else if (nsteps_cmdline
< -2)
313 gmx_fatal(FARGS
, "Invalid nsteps value passed on the command line: %d",
316 /* Do nothing if nsteps_cmdline == -2 */
322 //! Halt the run if there are inconsistences between user choices to run with GPUs and/or hardware detection.
323 static void exitIfCannotForceGpuRun(bool requirePhysicalGpu
,
325 bool useVerletScheme
,
326 bool compatibleGpusFound
)
328 /* Was GPU acceleration either explicitly (-nb gpu) or implicitly
329 * (gpu ID passed) requested? */
330 if (!requirePhysicalGpu
)
335 if (GMX_GPU
== GMX_GPU_NONE
)
337 gmx_fatal(FARGS
, "GPU acceleration requested, but %s was compiled without GPU support!",
338 gmx::getProgramContext().displayName());
343 gmx_fatal(FARGS
, "GPU emulation cannot be requested together with GPU acceleration!");
346 if (!useVerletScheme
)
348 gmx_fatal(FARGS
, "GPU acceleration requested, but can't be used without cutoff-scheme=Verlet");
351 if (!compatibleGpusFound
)
353 gmx_fatal(FARGS
, "GPU acceleration requested, but no compatible GPUs were detected.");
357 /*! \brief Return whether GPU acceleration is useful with the given settings.
359 * If not, logs a message about falling back to CPU code. */
360 static bool gpuAccelerationIsUseful(const MDLogger
&mdlog
,
361 const t_inputrec
*ir
,
364 if (doRerun
&& ir
->opts
.ngener
> 1)
366 /* Rerun execution time is dominated by I/O and pair search,
367 * so GPUs are not very useful, plus they do not support more
368 * than one energy group. If the user requested GPUs
369 * explicitly, a fatal error is given later. With non-reruns,
370 * we fall back to a single whole-of system energy group
371 * (which runs much faster than a multiple-energy-groups
372 * implementation would), and issue a note in the .log
373 * file. Users can re-run if they want the information. */
374 GMX_LOG(mdlog
.warning
).asParagraph().appendText("Multiple energy groups is not implemented for GPUs, so is not useful for this rerun, so falling back to the CPU");
381 //! \brief Return the correct integrator function.
382 static integrator_t
*my_integrator(unsigned int ei
)
391 if (!EI_DYNAMICS(ei
))
393 GMX_THROW(APIError("do_md integrator would be called for a non-dynamical integrator"));
408 GMX_THROW(APIError("do_tpi integrator would be called for a non-TPI integrator"));
412 GMX_THROW(NotImplementedError("SD2 integrator has been removed"));
414 GMX_THROW(APIError("Non existing integrator selected"));
418 //! Initializes the logger for mdrun.
419 static gmx::LoggerOwner
buildLogger(FILE *fplog
, const t_commrec
*cr
)
421 gmx::LoggerBuilder builder
;
422 if (fplog
!= nullptr)
424 builder
.addTargetFile(gmx::MDLogger::LogLevel::Info
, fplog
);
426 if (cr
== nullptr || SIMMASTER(cr
))
428 builder
.addTargetStream(gmx::MDLogger::LogLevel::Warning
,
429 &gmx::TextOutputFile::standardError());
431 return builder
.build();
434 int Mdrunner::mdrunner()
437 gmx_ddbox_t ddbox
= {0};
438 int npme_major
, npme_minor
;
440 gmx_mtop_t
*mtop
= nullptr;
441 t_mdatoms
*mdatoms
= nullptr;
442 t_forcerec
*fr
= nullptr;
443 t_fcdata
*fcd
= nullptr;
444 real ewaldcoeff_q
= 0;
445 real ewaldcoeff_lj
= 0;
446 struct gmx_pme_t
**pmedata
= nullptr;
447 gmx_vsite_t
*vsite
= nullptr;
449 int nChargePerturbed
= -1, nTypePerturbed
= 0, status
;
450 gmx_wallcycle_t wcycle
;
451 gmx_walltime_accounting_t walltime_accounting
= nullptr;
453 gmx_int64_t reset_counters
;
454 int nthreads_pme
= 1;
455 gmx_membed_t
* membed
= nullptr;
456 gmx_hw_info_t
*hwinfo
= nullptr;
458 /* CAUTION: threads may be started later on in this function, so
459 cr doesn't reflect the final parallel state right now */
460 gmx::MDModules mdModules
;
461 t_inputrec inputrecInstance
;
462 t_inputrec
*inputrec
= &inputrecInstance
;
465 if (mdrunOptions
.continuationOptions
.appendFiles
)
470 bool doMembed
= opt2bSet("-membed", nfile
, fnm
);
471 bool doRerun
= mdrunOptions
.rerun
;
473 /* Handle GPU-related user options. Later, we check consistency
474 * with things like whether support is compiled, or tMPI thread
476 bool emulateGpu
= getenv("GMX_EMULATE_GPU") != nullptr;
477 bool forceUseCpu
= (strncmp(nbpu_opt
, "cpu", 3) == 0);
478 if (!hw_opt
.gpuIdTaskAssignment
.empty() && forceUseCpu
)
480 gmx_fatal(FARGS
, "GPU IDs were specified, and short-ranged interactions were assigned to the CPU. Make no more than one of these choices.");
482 bool forceUsePhysicalGpu
= (strncmp(nbpu_opt
, "gpu", 3) == 0) || !hw_opt
.gpuIdTaskAssignment
.empty();
483 bool tryUsePhysicalGpu
= (strncmp(nbpu_opt
, "auto", 4) == 0) && hw_opt
.gpuIdTaskAssignment
.empty() && !emulateGpu
;
484 GMX_RELEASE_ASSERT(!(forceUsePhysicalGpu
&& tryUsePhysicalGpu
), "Must either force use of "
485 "GPUs for short-ranged interactions, or try to use them, not both.");
487 // Here we assume that SIMMASTER(cr) does not change even after the
488 // threads are started.
489 gmx::LoggerOwner
logOwner(buildLogger(fplog
, cr
));
490 gmx::MDLogger
mdlog(logOwner
.logger());
492 hwinfo
= gmx_detect_hardware(mdlog
, cr
);
494 gmx_print_detected_hardware(fplog
, cr
, mdlog
, hwinfo
);
496 if (fplog
!= nullptr)
498 /* Print references after all software/hardware printing */
499 please_cite(fplog
, "Abraham2015");
500 please_cite(fplog
, "Pall2015");
501 please_cite(fplog
, "Pronk2013");
502 please_cite(fplog
, "Hess2008b");
503 please_cite(fplog
, "Spoel2005a");
504 please_cite(fplog
, "Lindahl2001a");
505 please_cite(fplog
, "Berendsen95a");
508 // TODO state should be nullptr on non-master ranks, and perhaps call it globalState
509 std::unique_ptr
<t_state
> stateInstance
= std::unique_ptr
<t_state
>(new t_state
);
510 t_state
* state
= stateInstance
.get();
514 /* Read (nearly) all data required for the simulation */
515 read_tpx_state(ftp2fn(efTPR
, nfile
, fnm
), inputrec
, state
, mtop
);
517 exitIfCannotForceGpuRun(forceUsePhysicalGpu
,
519 inputrec
->cutoff_scheme
== ecutsVERLET
,
520 compatibleGpusFound(hwinfo
->gpu_info
));
522 if (inputrec
->cutoff_scheme
== ecutsVERLET
)
524 /* TODO This logic could run later, e.g. before -npme -1
525 is handled. If inputrec has already been communicated,
526 then the resulting tryUsePhysicalGpu does not need to
528 if ((tryUsePhysicalGpu
|| forceUsePhysicalGpu
) &&
529 !gpuAccelerationIsUseful(mdlog
, inputrec
, doRerun
))
531 /* Fallback message printed by nbnxn_acceleration_supported */
532 if (forceUsePhysicalGpu
)
534 gmx_fatal(FARGS
, "GPU acceleration requested, but not supported with the given input settings");
536 tryUsePhysicalGpu
= false;
541 if (nstlist_cmdline
> 0)
543 gmx_fatal(FARGS
, "Can not set nstlist with the group cut-off scheme");
546 if (compatibleGpusFound(hwinfo
->gpu_info
))
548 GMX_LOG(mdlog
.warning
).asParagraph().appendText(
549 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
550 " To use a GPU, set the mdp option: cutoff-scheme = Verlet");
552 tryUsePhysicalGpu
= false;
555 bool nonbondedOnGpu
= (tryUsePhysicalGpu
|| forceUsePhysicalGpu
) && compatibleGpusFound(hwinfo
->gpu_info
);
557 /* Check and update the hardware options for internal consistency */
558 check_and_update_hw_opt_1(&hw_opt
, cr
, domdecOptions
.numPmeRanks
);
560 /* Early check for externally set process affinity. */
561 gmx_check_thread_affinity_set(mdlog
, cr
,
562 &hw_opt
, hwinfo
->nthreads_hw_avail
, FALSE
);
567 if (domdecOptions
.numPmeRanks
> 0 && hw_opt
.nthreads_tmpi
<= 0)
569 gmx_fatal(FARGS
, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
572 /* Since the master knows the cut-off scheme, update hw_opt for this.
573 * This is done later for normal MPI and also once more with tMPI
574 * for all tMPI ranks.
576 check_and_update_hw_opt_2(&hw_opt
, inputrec
->cutoff_scheme
);
578 /* Determine how many thread-MPI ranks to start.
580 * TODO Over-writing the user-supplied value here does
581 * prevent any possible subsequent checks from working
583 hw_opt
.nthreads_tmpi
= get_nthreads_mpi(hwinfo
,
585 domdecOptions
.numPmeRanks
,
591 // Now start the threads for thread MPI.
592 cr
= spawnThreads(hw_opt
.nthreads_tmpi
);
593 /* The main thread continues here with a new cr. We don't deallocate
594 the old cr because other threads may still be reading it. */
595 // TODO Both master and spawned threads call dup_tfn and
596 // reinitialize_commrec_for_this_thread. Find a way to express
600 /* END OF CAUTION: cr is now reliable */
604 /* now broadcast everything to the non-master nodes/threads: */
605 init_parallel(cr
, inputrec
, mtop
);
607 gmx_bcast_sim(sizeof(nonbondedOnGpu
), &nonbondedOnGpu
, cr
);
609 // TODO: Error handling
610 mdModules
.assignOptionsToModules(*inputrec
->params
, nullptr);
612 if (fplog
!= nullptr)
614 pr_inputrec(fplog
, 0, "Input Parameters", inputrec
, FALSE
);
615 fprintf(fplog
, "\n");
618 /* now make sure the state is initialized and propagated */
619 set_state_entries(state
, inputrec
);
621 /* A parallel command line option consistency check that we can
622 only do after any threads have started. */
623 if (!PAR(cr
) && (domdecOptions
.numCells
[XX
] > 1 ||
624 domdecOptions
.numCells
[YY
] > 1 ||
625 domdecOptions
.numCells
[ZZ
] > 1 ||
626 domdecOptions
.numPmeRanks
> 0))
629 "The -dd or -npme option request a parallel simulation, "
631 "but %s was compiled without threads or MPI enabled"
634 "but the number of MPI-threads (option -ntmpi) is not set or is 1"
636 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
639 , output_env_get_program_display_name(oenv
)
644 (EI_ENERGY_MINIMIZATION(inputrec
->eI
) || eiNM
== inputrec
->eI
))
646 gmx_fatal(FARGS
, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
649 if (can_use_allvsall(inputrec
, TRUE
, cr
, fplog
) && DOMAINDECOMP(cr
))
651 gmx_fatal(FARGS
, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
654 if (!(EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
)))
656 if (domdecOptions
.numPmeRanks
> 0)
658 gmx_fatal_collective(FARGS
, cr
->mpi_comm_mysim
, MASTER(cr
),
659 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
662 domdecOptions
.numPmeRanks
= 0;
665 if (nonbondedOnGpu
&& domdecOptions
.numPmeRanks
< 0)
667 /* With GPUs we don't automatically use PME-only ranks. PME ranks can
668 * improve performance with many threads per GPU, since our OpenMP
669 * scaling is bad, but it's difficult to automate the setup.
671 domdecOptions
.numPmeRanks
= 0;
677 fcRegisterSteps(inputrec
->nsteps
, inputrec
->init_step
);
681 /* NMR restraints must be initialized before load_checkpoint,
682 * since with time averaging the history is added to t_state.
683 * For proper consistency check we therefore need to extend
685 * So the PME-only nodes (if present) will also initialize
686 * the distance restraints.
690 /* This needs to be called before read_checkpoint to extend the state */
691 init_disres(fplog
, mtop
, inputrec
, cr
, fcd
, state
, replExParams
.exchangeInterval
> 0);
693 init_orires(fplog
, mtop
, as_rvec_array(state
->x
.data()), inputrec
, cr
, &(fcd
->orires
),
696 if (inputrecDeform(inputrec
))
698 /* Store the deform reference box before reading the checkpoint */
701 copy_mat(state
->box
, box
);
705 gmx_bcast(sizeof(box
), box
, cr
);
707 /* Because we do not have the update struct available yet
708 * in which the reference values should be stored,
709 * we store them temporarily in static variables.
710 * This should be thread safe, since they are only written once
711 * and with identical values.
713 tMPI_Thread_mutex_lock(&deform_init_box_mutex
);
714 deform_init_init_step_tpx
= inputrec
->init_step
;
715 copy_mat(box
, deform_init_box_tpx
);
716 tMPI_Thread_mutex_unlock(&deform_init_box_mutex
);
719 ObservablesHistory observablesHistory
= {};
721 ContinuationOptions
&continuationOptions
= mdrunOptions
.continuationOptions
;
723 if (continuationOptions
.startedFromCheckpoint
)
725 /* Check if checkpoint file exists before doing continuation.
726 * This way we can use identical input options for the first and subsequent runs...
730 load_checkpoint(opt2fn_master("-cpi", nfile
, fnm
, cr
), &fplog
,
731 cr
, domdecOptions
.numCells
,
732 inputrec
, state
, &bReadEkin
, &observablesHistory
,
733 continuationOptions
.appendFiles
,
734 continuationOptions
.appendFilesOptionSet
,
735 mdrunOptions
.reproducible
);
739 continuationOptions
.haveReadEkin
= true;
743 if (SIMMASTER(cr
) && continuationOptions
.appendFiles
)
745 gmx_log_open(ftp2fn(efLOG
, nfile
, fnm
), cr
,
746 continuationOptions
.appendFiles
, &fplog
);
747 logOwner
= buildLogger(fplog
, nullptr);
748 mdlog
= logOwner
.logger();
751 /* override nsteps with value set on the commamdline */
752 override_nsteps_cmdline(mdlog
, mdrunOptions
.numStepsCommandline
, inputrec
);
756 copy_mat(state
->box
, box
);
761 gmx_bcast(sizeof(box
), box
, cr
);
764 /* Update rlist and nstlist. */
765 if (inputrec
->cutoff_scheme
== ecutsVERLET
)
767 prepare_verlet_scheme(fplog
, cr
, inputrec
, nstlist_cmdline
, mtop
,
768 box
, nonbondedOnGpu
|| emulateGpu
, *hwinfo
->cpuInfo
);
771 if (PAR(cr
) && !(EI_TPI(inputrec
->eI
) ||
772 inputrec
->eI
== eiNM
))
774 cr
->dd
= init_domain_decomposition(fplog
, cr
, domdecOptions
, mdrunOptions
,
776 box
, as_rvec_array(state
->x
.data()),
777 &ddbox
, &npme_major
, &npme_minor
);
778 // Note that local state still does not exist yet.
782 /* PME, if used, is done on all nodes with 1D decomposition */
784 cr
->duty
= (DUTY_PP
| DUTY_PME
);
788 if (inputrec
->ePBC
== epbcSCREW
)
791 "pbc=%s is only implemented with domain decomposition",
792 epbc_names
[inputrec
->ePBC
]);
798 /* After possible communicator splitting in make_dd_communicators.
799 * we can set up the intra/inter node communication.
801 gmx_setup_nodecomm(fplog
, cr
);
804 /* Initialize per-physical-node MPI process/thread ID and counters. */
805 gmx_init_intranode_counters(cr
);
809 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted(
810 "This is simulation %d out of %d running as a composite GROMACS\n"
811 "multi-simulation job. Setup for this simulation:\n",
812 cr
->ms
->sim
, cr
->ms
->nsim
);
814 GMX_LOG(mdlog
.warning
).appendTextFormatted(
818 cr
->nnodes
== 1 ? "thread" : "threads"
820 cr
->nnodes
== 1 ? "process" : "processes"
826 /* Check and update hw_opt for the cut-off scheme */
827 check_and_update_hw_opt_2(&hw_opt
, inputrec
->cutoff_scheme
);
829 /* Check and update hw_opt for the number of MPI ranks */
830 check_and_update_hw_opt_3(&hw_opt
);
832 gmx_omp_nthreads_init(mdlog
, cr
,
833 hwinfo
->nthreads_hw_avail
,
835 hw_opt
.nthreads_omp_pme
,
836 (cr
->duty
& DUTY_PP
) == 0,
837 inputrec
->cutoff_scheme
== ecutsVERLET
);
840 if (EI_TPI(inputrec
->eI
) &&
841 inputrec
->cutoff_scheme
== ecutsVERLET
)
843 gmx_feenableexcept();
847 // Contains the ID of the GPU used by each PP rank on this node,
848 // indexed by that rank. Empty if no GPUs are selected for use on
850 std::vector
<int> gpuTaskAssignment
;
853 /* Currently the DD code assigns duty to ranks that can
854 * include PP work that currently can be executed on a single
855 * GPU, if present and compatible. This has to be coordinated
856 * across PP ranks on a node, with possible multiple devices
857 * or sharing devices on a node, either from the user
858 * selection, or automatically. */
859 bool rankCanUseGpu
= cr
->duty
& DUTY_PP
;
860 gpuTaskAssignment
= mapPpRanksToGpus(rankCanUseGpu
, cr
, hwinfo
->gpu_info
, hw_opt
);
863 reportGpuUsage(mdlog
, hwinfo
->gpu_info
, !hw_opt
.gpuIdTaskAssignment
.empty(),
864 gpuTaskAssignment
, cr
->nrank_pp_intranode
, cr
->nnodes
> 1);
866 if (!gpuTaskAssignment
.empty())
868 GMX_RELEASE_ASSERT(cr
->nrank_pp_intranode
== static_cast<int>(gpuTaskAssignment
.size()),
869 "The number of PP ranks on each node must equal the number of GPU tasks used on each node");
872 /* Prevent other ranks from continuing after an issue was found
873 * and reported as a fatal error.
875 * TODO This function implements a barrier so that MPI runtimes
876 * can organize an orderly shutdown if one of the ranks has had to
877 * issue a fatal error in various code already run. When we have
878 * MPI-aware error handling and reporting, this should be
883 MPI_Barrier(cr
->mpi_comm_mysim
);
887 /* Now that we know the setup is consistent, check for efficiency */
888 check_resource_division_efficiency(hwinfo
, hw_opt
.nthreads_tot
, !gpuTaskAssignment
.empty(), mdrunOptions
.ntompOptionIsSet
,
891 gmx_device_info_t
*shortRangedDeviceInfo
= nullptr;
892 int shortRangedDeviceId
= -1;
893 if (cr
->duty
& DUTY_PP
)
895 if (!gpuTaskAssignment
.empty())
897 shortRangedDeviceId
= gpuTaskAssignment
[cr
->rank_pp_intranode
];
898 shortRangedDeviceInfo
= getDeviceInfo(hwinfo
->gpu_info
, shortRangedDeviceId
);
902 if (DOMAINDECOMP(cr
))
904 /* When we share GPUs over ranks, we need to know this for the DLB */
905 dd_setup_dlb_resource_sharing(cr
, shortRangedDeviceId
);
908 /* getting number of PP/PME threads
909 PME: env variable should be read only on one node to make sure it is
910 identical everywhere;
912 nthreads_pme
= gmx_omp_nthreads_get(emntPME
);
914 wcycle
= wallcycle_init(fplog
, mdrunOptions
.timingOptions
.resetStep
, cr
);
918 /* Master synchronizes its value of reset_counters with all nodes
919 * including PME only nodes */
920 reset_counters
= wcycle_get_reset_counters(wcycle
);
921 gmx_bcast_sim(sizeof(reset_counters
), &reset_counters
, cr
);
922 wcycle_set_reset_counters(wcycle
, reset_counters
);
925 // Membrane embedding must be initialized before we call init_forcerec()
930 fprintf(stderr
, "Initializing membed");
932 /* Note that membed cannot work in parallel because mtop is
933 * changed here. Fix this if we ever want to make it run with
935 membed
= init_membed(fplog
, nfile
, fnm
, mtop
, inputrec
, state
, cr
, &mdrunOptions
.checkpointOptions
.period
);
939 if (cr
->duty
& DUTY_PP
)
941 bcast_state(cr
, state
);
943 /* Initiate forcerecord */
945 fr
->forceProviders
= mdModules
.initForceProviders();
946 init_forcerec(fplog
, mdlog
, fr
, fcd
,
947 inputrec
, mtop
, cr
, box
,
948 opt2fn("-table", nfile
, fnm
),
949 opt2fn("-tablep", nfile
, fnm
),
950 getFilenm("-tableb", nfile
, fnm
),
952 shortRangedDeviceInfo
,
956 /* Initialize QM-MM */
959 init_QMMMrec(cr
, mtop
, inputrec
, fr
);
962 /* Initialize the mdatoms structure.
963 * mdatoms is not filled with atom data,
964 * as this can not be done now with domain decomposition.
966 mdatoms
= init_mdatoms(fplog
, mtop
, inputrec
->efep
!= efepNO
);
968 /* Initialize the virtual site communication */
969 vsite
= init_vsite(mtop
, cr
, FALSE
);
971 calc_shifts(box
, fr
->shift_vec
);
973 /* With periodic molecules the charge groups should be whole at start up
974 * and the virtual sites should not be far from their proper positions.
976 if (!inputrec
->bContinuation
&& MASTER(cr
) &&
977 !(inputrec
->ePBC
!= epbcNONE
&& inputrec
->bPeriodicMols
))
979 /* Make molecules whole at start of run */
980 if (fr
->ePBC
!= epbcNONE
)
982 do_pbc_first_mtop(fplog
, inputrec
->ePBC
, box
, mtop
, as_rvec_array(state
->x
.data()));
986 /* Correct initial vsite positions are required
987 * for the initial distribution in the domain decomposition
988 * and for the initial shell prediction.
990 construct_vsites_mtop(vsite
, mtop
, as_rvec_array(state
->x
.data()));
994 if (EEL_PME(fr
->eeltype
) || EVDW_PME(fr
->vdwtype
))
996 ewaldcoeff_q
= fr
->ewaldcoeff_q
;
997 ewaldcoeff_lj
= fr
->ewaldcoeff_lj
;
998 pmedata
= &fr
->pmedata
;
1007 /* This is a PME only node */
1009 /* We don't need the state */
1010 stateInstance
.reset();
1013 ewaldcoeff_q
= calc_ewaldcoeff_q(inputrec
->rcoulomb
, inputrec
->ewald_rtol
);
1014 ewaldcoeff_lj
= calc_ewaldcoeff_lj(inputrec
->rvdw
, inputrec
->ewald_rtol_lj
);
1018 if (hw_opt
.thread_affinity
!= threadaffOFF
)
1020 /* Before setting affinity, check whether the affinity has changed
1021 * - which indicates that probably the OpenMP library has changed it
1022 * since we first checked).
1024 gmx_check_thread_affinity_set(mdlog
, cr
,
1025 &hw_opt
, hwinfo
->nthreads_hw_avail
, TRUE
);
1028 /* threads on this MPI process or TMPI thread */
1029 if (cr
->duty
& DUTY_PP
)
1031 nthread_local
= gmx_omp_nthreads_get(emntNonbonded
);
1035 nthread_local
= gmx_omp_nthreads_get(emntPME
);
1038 /* Set the CPU affinity */
1039 gmx_set_thread_affinity(mdlog
, cr
, &hw_opt
, *hwinfo
->hardwareTopology
,
1040 nthread_local
, nullptr);
1043 /* Initiate PME if necessary,
1044 * either on all nodes or on dedicated PME nodes only. */
1045 if (EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
))
1049 nChargePerturbed
= mdatoms
->nChargePerturbed
;
1050 if (EVDW_PME(inputrec
->vdwtype
))
1052 nTypePerturbed
= mdatoms
->nTypePerturbed
;
1055 if (cr
->npmenodes
> 0)
1057 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1058 gmx_bcast_sim(sizeof(nChargePerturbed
), &nChargePerturbed
, cr
);
1059 gmx_bcast_sim(sizeof(nTypePerturbed
), &nTypePerturbed
, cr
);
1062 if (cr
->duty
& DUTY_PME
)
1066 status
= gmx_pme_init(pmedata
, cr
, npme_major
, npme_minor
, inputrec
,
1067 mtop
? mtop
->natoms
: 0, nChargePerturbed
, nTypePerturbed
,
1068 mdrunOptions
.reproducible
,
1069 ewaldcoeff_q
, ewaldcoeff_lj
,
1072 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
1075 gmx_fatal(FARGS
, "Error %d initializing PME", status
);
1081 if (EI_DYNAMICS(inputrec
->eI
))
1083 /* Turn on signal handling on all nodes */
1085 * (A user signal from the PME nodes (if any)
1086 * is communicated to the PP nodes.
1088 signal_handler_install();
1091 if (cr
->duty
& DUTY_PP
)
1093 /* Assumes uniform use of the number of OpenMP threads */
1094 walltime_accounting
= walltime_accounting_init(gmx_omp_nthreads_get(emntDefault
));
1096 if (inputrec
->bPull
)
1098 /* Initialize pull code */
1099 inputrec
->pull_work
=
1100 init_pull(fplog
, inputrec
->pull
, inputrec
, nfile
, fnm
,
1101 mtop
, cr
, oenv
, inputrec
->fepvals
->init_lambda
,
1102 EI_DYNAMICS(inputrec
->eI
) && MASTER(cr
),
1103 continuationOptions
);
1108 /* Initialize enforced rotation code */
1109 init_rot(fplog
, inputrec
, nfile
, fnm
, cr
, as_rvec_array(state
->x
.data()), state
->box
, mtop
, oenv
,
1113 /* Let init_constraints know whether we have essential dynamics constraints.
1114 * TODO: inputrec should tell us whether we use an algorithm, not a file option or the checkpoint
1116 bool doEdsam
= (opt2fn_null("-ei", nfile
, fnm
) != nullptr || observablesHistory
.edsamHistory
);
1118 constr
= init_constraints(fplog
, mtop
, inputrec
, doEdsam
, cr
);
1120 if (DOMAINDECOMP(cr
))
1122 GMX_RELEASE_ASSERT(fr
, "fr was NULL while cr->duty was DUTY_PP");
1123 /* This call is not included in init_domain_decomposition mainly
1124 * because fr->cginfo_mb is set later.
1126 dd_init_bondeds(fplog
, cr
->dd
, mtop
, vsite
, inputrec
,
1127 domdecOptions
.checkBondedInteractions
,
1131 /* Now do whatever the user wants us to do (how flexible...) */
1132 my_integrator(inputrec
->eI
) (fplog
, cr
, mdlog
, nfile
, fnm
,
1136 mdModules
.outputProvider(),
1138 fcd
, state
, &observablesHistory
,
1139 mdatoms
, nrnb
, wcycle
, fr
,
1142 walltime_accounting
);
1146 finish_rot(inputrec
->rot
);
1149 if (inputrec
->bPull
)
1151 finish_pull(inputrec
->pull_work
);
1157 GMX_RELEASE_ASSERT(pmedata
, "pmedata was NULL while cr->duty was not DUTY_PP");
1159 walltime_accounting
= walltime_accounting_init(gmx_omp_nthreads_get(emntPME
));
1160 gmx_pmeonly(*pmedata
, cr
, nrnb
, wcycle
, walltime_accounting
, ewaldcoeff_q
, ewaldcoeff_lj
, inputrec
);
1163 wallcycle_stop(wcycle
, ewcRUN
);
1165 /* Finish up, write some stuff
1166 * if rerunMD, don't write last frame again
1168 finish_run(fplog
, mdlog
, cr
,
1169 inputrec
, nrnb
, wcycle
, walltime_accounting
,
1170 fr
? fr
->nbv
: nullptr,
1171 EI_DYNAMICS(inputrec
->eI
) && !MULTISIM(cr
));
1176 gmx_pme_destroy(*pmedata
); // TODO: pmedata is always a single element list, refactor
1180 /* Free GPU memory and context */
1181 free_gpu_resources(fr
, cr
, shortRangedDeviceInfo
);
1185 free_membed(membed
);
1188 gmx_hardware_info_free(hwinfo
);
1190 /* Does what it says */
1191 print_date_and_time(fplog
, cr
->nodeid
, "Finished mdrun", gmx_gettime());
1192 walltime_accounting_destroy(walltime_accounting
);
1194 /* Close logfile already here if we were appending to it */
1195 if (MASTER(cr
) && continuationOptions
.appendFiles
)
1197 gmx_log_close(fplog
);
1200 rc
= (int)gmx_get_stop_condition();
1203 /* we need to join all threads. The sub-threads join when they
1204 exit this function, but the master thread needs to be told to
1206 if (PAR(cr
) && MASTER(cr
))