Cleaned up ewaldcoeff for PME-only ranks
[gromacs.git] / src / programs / mdrun / runner.cpp
blob7ac7975338efb5434c88a11698e48b7588a7a0c6
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \internal \file
39 * \brief Implements the MD runner routine calling all integrators.
41 * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42 * \ingroup module_mdlib
44 #include "gmxpre.h"
46 #include "runner.h"
48 #include "config.h"
50 #include <cassert>
51 #include <csignal>
52 #include <cstdlib>
53 #include <cstring>
55 #include <algorithm>
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/domdec.h"
59 #include "gromacs/domdec/domdec_struct.h"
60 #include "gromacs/ewald/ewald-utils.h"
61 #include "gromacs/ewald/pme.h"
62 #include "gromacs/fileio/checkpoint.h"
63 #include "gromacs/fileio/oenv.h"
64 #include "gromacs/fileio/tpxio.h"
65 #include "gromacs/gmxlib/network.h"
66 #include "gromacs/gpu_utils/gpu_utils.h"
67 #include "gromacs/hardware/cpuinfo.h"
68 #include "gromacs/hardware/detecthardware.h"
69 #include "gromacs/hardware/printhardware.h"
70 #include "gromacs/listed-forces/disre.h"
71 #include "gromacs/listed-forces/orires.h"
72 #include "gromacs/math/functions.h"
73 #include "gromacs/math/utilities.h"
74 #include "gromacs/math/vec.h"
75 #include "gromacs/mdlib/calc_verletbuf.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/force.h"
78 #include "gromacs/mdlib/forcerec.h"
79 #include "gromacs/mdlib/gmx_omp_nthreads.h"
80 #include "gromacs/mdlib/integrator.h"
81 #include "gromacs/mdlib/main.h"
82 #include "gromacs/mdlib/md_support.h"
83 #include "gromacs/mdlib/mdatoms.h"
84 #include "gromacs/mdlib/mdrun.h"
85 #include "gromacs/mdlib/minimize.h"
86 #include "gromacs/mdlib/nb_verlet.h"
87 #include "gromacs/mdlib/nbnxn_search.h"
88 #include "gromacs/mdlib/nbnxn_tuning.h"
89 #include "gromacs/mdlib/qmmm.h"
90 #include "gromacs/mdlib/sighandler.h"
91 #include "gromacs/mdlib/sim_util.h"
92 #include "gromacs/mdlib/tpi.h"
93 #include "gromacs/mdrunutility/mdmodules.h"
94 #include "gromacs/mdrunutility/threadaffinity.h"
95 #include "gromacs/mdtypes/commrec.h"
96 #include "gromacs/mdtypes/edsamhistory.h"
97 #include "gromacs/mdtypes/energyhistory.h"
98 #include "gromacs/mdtypes/inputrec.h"
99 #include "gromacs/mdtypes/md_enums.h"
100 #include "gromacs/mdtypes/observableshistory.h"
101 #include "gromacs/mdtypes/state.h"
102 #include "gromacs/mdtypes/swaphistory.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/pulling/pull.h"
105 #include "gromacs/pulling/pull_rotation.h"
106 #include "gromacs/taskassignment/hardwareassign.h"
107 #include "gromacs/taskassignment/resourcedivision.h"
108 #include "gromacs/timing/wallcycle.h"
109 #include "gromacs/topology/mtop_util.h"
110 #include "gromacs/trajectory/trajectoryframe.h"
111 #include "gromacs/utility/cstringutil.h"
112 #include "gromacs/utility/exceptions.h"
113 #include "gromacs/utility/fatalerror.h"
114 #include "gromacs/utility/filestream.h"
115 #include "gromacs/utility/gmxassert.h"
116 #include "gromacs/utility/gmxmpi.h"
117 #include "gromacs/utility/logger.h"
118 #include "gromacs/utility/loggerbuilder.h"
119 #include "gromacs/utility/pleasecite.h"
120 #include "gromacs/utility/programcontext.h"
121 #include "gromacs/utility/smalloc.h"
123 #include "deform.h"
124 #include "md.h"
125 #include "membed.h"
126 #include "repl_ex.h"
128 #ifdef GMX_FAHCORE
129 #include "corewrap.h"
130 #endif
132 //! First step used in pressure scaling
133 gmx_int64_t deform_init_init_step_tpx;
134 //! Initial box for pressure scaling
135 matrix deform_init_box_tpx;
136 //! MPI variable for use in pressure scaling
137 tMPI_Thread_mutex_t deform_init_box_mutex = TMPI_THREAD_MUTEX_INITIALIZER;
139 #if GMX_THREAD_MPI
140 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
141 * the number of threads will get lowered.
143 #define MIN_ATOMS_PER_MPI_THREAD 90
144 #define MIN_ATOMS_PER_GPU 900
146 namespace gmx
149 void Mdrunner::reinitializeOnSpawnedThread()
151 // TODO This duplication is formally necessary if any thread might
152 // modify any memory in fnm or the pointers it contains. If the
153 // contents are ever provably const, then we can remove this
154 // allocation (and memory leak).
155 // TODO This should probably become part of a copy constructor for
156 // Mdrunner.
157 fnm = dup_tfn(nfile, fnm);
159 cr = reinitialize_commrec_for_this_thread(cr);
161 if (!MASTER(cr))
163 // Only the master rank writes to the log files
164 fplog = nullptr;
168 /*! \brief The callback used for running on spawned threads.
170 * Obtains the pointer to the master mdrunner object from the one
171 * argument permitted to the thread-launch API call, copies it to make
172 * a new runner for this thread, reinitializes necessary data, and
173 * proceeds to the simulation. */
174 static void mdrunner_start_fn(void *arg)
178 auto masterMdrunner = reinterpret_cast<const gmx::Mdrunner *>(arg);
179 /* copy the arg list to make sure that it's thread-local. This
180 doesn't copy pointed-to items, of course, but those are all
181 const. */
182 gmx::Mdrunner mdrunner = *masterMdrunner;
183 mdrunner.reinitializeOnSpawnedThread();
184 mdrunner.mdrunner();
186 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
190 /*! \brief Start thread-MPI threads.
192 * Called by mdrunner() to start a specific number of threads
193 * (including the main thread) for thread-parallel runs. This in turn
194 * calls mdrunner() for each thread. All options are the same as for
195 * mdrunner(). */
196 t_commrec *Mdrunner::spawnThreads(int numThreadsToLaunch)
199 /* first check whether we even need to start tMPI */
200 if (numThreadsToLaunch < 2)
202 return cr;
205 gmx::Mdrunner spawnedMdrunner = *this;
206 // TODO This duplication is formally necessary if any thread might
207 // modify any memory in fnm or the pointers it contains. If the
208 // contents are ever provably const, then we can remove this
209 // allocation (and memory leak).
210 // TODO This should probably become part of a copy constructor for
211 // Mdrunner.
212 spawnedMdrunner.fnm = dup_tfn(this->nfile, fnm);
214 /* now spawn new threads that start mdrunner_start_fn(), while
215 the main thread returns, we set thread affinity later */
216 if (tMPI_Init_fn(TRUE, numThreadsToLaunch, TMPI_AFFINITY_NONE,
217 mdrunner_start_fn, static_cast<void*>(&spawnedMdrunner)) != TMPI_SUCCESS)
219 GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads"));
222 return reinitialize_commrec_for_this_thread(cr);
225 } // namespace
227 #endif /* GMX_THREAD_MPI */
229 /*! \brief Initialize variables for Verlet scheme simulation */
230 static void prepare_verlet_scheme(FILE *fplog,
231 t_commrec *cr,
232 t_inputrec *ir,
233 int nstlist_cmdline,
234 const gmx_mtop_t *mtop,
235 const matrix box,
236 bool makeGpuPairList,
237 const gmx::CpuInfo &cpuinfo)
239 /* For NVE simulations, we will retain the initial list buffer */
240 if (EI_DYNAMICS(ir->eI) &&
241 ir->verletbuf_tol > 0 &&
242 !(EI_MD(ir->eI) && ir->etc == etcNO))
244 /* Update the Verlet buffer size for the current run setup */
245 verletbuf_list_setup_t ls;
246 real rlist_new;
248 /* Here we assume SIMD-enabled kernels are being used. But as currently
249 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
250 * and 4x2 gives a larger buffer than 4x4, this is ok.
252 verletbuf_get_list_setup(true, makeGpuPairList, &ls);
254 calc_verlet_buffer_size(mtop, det(box), ir, ir->nstlist, ir->nstlist - 1, -1, &ls, nullptr, &rlist_new);
256 if (rlist_new != ir->rlist)
258 if (fplog != nullptr)
260 fprintf(fplog, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
261 ir->rlist, rlist_new,
262 ls.cluster_size_i, ls.cluster_size_j);
264 ir->rlist = rlist_new;
268 if (nstlist_cmdline > 0 && (!EI_DYNAMICS(ir->eI) || ir->verletbuf_tol <= 0))
270 gmx_fatal(FARGS, "Can not set nstlist without %s",
271 !EI_DYNAMICS(ir->eI) ? "dynamics" : "verlet-buffer-tolerance");
274 if (EI_DYNAMICS(ir->eI))
276 /* Set or try nstlist values */
277 increaseNstlist(fplog, cr, ir, nstlist_cmdline, mtop, box, makeGpuPairList, cpuinfo);
281 /*! \brief Override the nslist value in inputrec
283 * with value passed on the command line (if any)
285 static void override_nsteps_cmdline(const gmx::MDLogger &mdlog,
286 gmx_int64_t nsteps_cmdline,
287 t_inputrec *ir)
289 assert(ir);
291 /* override with anything else than the default -2 */
292 if (nsteps_cmdline > -2)
294 char sbuf_steps[STEPSTRSIZE];
295 char sbuf_msg[STRLEN];
297 ir->nsteps = nsteps_cmdline;
298 if (EI_DYNAMICS(ir->eI) && nsteps_cmdline != -1)
300 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
301 gmx_step_str(nsteps_cmdline, sbuf_steps),
302 fabs(nsteps_cmdline*ir->delta_t));
304 else
306 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps",
307 gmx_step_str(nsteps_cmdline, sbuf_steps));
310 GMX_LOG(mdlog.warning).asParagraph().appendText(sbuf_msg);
312 else if (nsteps_cmdline < -2)
314 gmx_fatal(FARGS, "Invalid nsteps value passed on the command line: %d",
315 nsteps_cmdline);
317 /* Do nothing if nsteps_cmdline == -2 */
320 namespace gmx
323 //! Halt the run if there are inconsistences between user choices to run with GPUs and/or hardware detection.
324 static void exitIfCannotForceGpuRun(bool requirePhysicalGpu,
325 EmulateGpuNonbonded emulateGpuNonbonded,
326 bool useVerletScheme,
327 bool compatibleGpusFound)
329 /* Was GPU acceleration either explicitly (-nb gpu) or implicitly
330 * (gpu ID passed) requested? */
331 if (!requirePhysicalGpu)
333 return;
336 if (GMX_GPU == GMX_GPU_NONE)
338 gmx_fatal(FARGS, "GPU acceleration requested, but %s was compiled without GPU support!",
339 gmx::getProgramContext().displayName());
342 if (emulateGpuNonbonded == EmulateGpuNonbonded::Yes)
344 gmx_fatal(FARGS, "GPU emulation cannot be requested together with GPU acceleration!");
347 if (!useVerletScheme)
349 gmx_fatal(FARGS, "GPU acceleration requested, but can't be used without cutoff-scheme=Verlet");
352 if (!compatibleGpusFound)
354 gmx_fatal(FARGS, "GPU acceleration requested, but no compatible GPUs were detected.");
358 /*! \brief Return whether GPU acceleration is useful with the given settings.
360 * If not, logs a message about falling back to CPU code. */
361 static bool gpuAccelerationIsUseful(const MDLogger &mdlog,
362 const t_inputrec *ir,
363 bool doRerun)
365 if (doRerun && ir->opts.ngener > 1)
367 /* Rerun execution time is dominated by I/O and pair search,
368 * so GPUs are not very useful, plus they do not support more
369 * than one energy group. If the user requested GPUs
370 * explicitly, a fatal error is given later. With non-reruns,
371 * we fall back to a single whole-of system energy group
372 * (which runs much faster than a multiple-energy-groups
373 * implementation would), and issue a note in the .log
374 * file. Users can re-run if they want the information. */
375 GMX_LOG(mdlog.warning).asParagraph().appendText("Multiple energy groups is not implemented for GPUs, so is not useful for this rerun, so falling back to the CPU");
376 return false;
379 return true;
382 //! \brief Return the correct integrator function.
383 static integrator_t *my_integrator(unsigned int ei)
385 switch (ei)
387 case eiMD:
388 case eiBD:
389 case eiSD1:
390 case eiVV:
391 case eiVVAK:
392 if (!EI_DYNAMICS(ei))
394 GMX_THROW(APIError("do_md integrator would be called for a non-dynamical integrator"));
396 return do_md;
397 case eiSteep:
398 return do_steep;
399 case eiCG:
400 return do_cg;
401 case eiNM:
402 return do_nm;
403 case eiLBFGS:
404 return do_lbfgs;
405 case eiTPI:
406 case eiTPIC:
407 if (!EI_TPI(ei))
409 GMX_THROW(APIError("do_tpi integrator would be called for a non-TPI integrator"));
411 return do_tpi;
412 case eiSD2_REMOVED:
413 GMX_THROW(NotImplementedError("SD2 integrator has been removed"));
414 default:
415 GMX_THROW(APIError("Non existing integrator selected"));
419 //! Initializes the logger for mdrun.
420 static gmx::LoggerOwner buildLogger(FILE *fplog, const t_commrec *cr)
422 gmx::LoggerBuilder builder;
423 if (fplog != nullptr)
425 builder.addTargetFile(gmx::MDLogger::LogLevel::Info, fplog);
427 if (cr == nullptr || SIMMASTER(cr))
429 builder.addTargetStream(gmx::MDLogger::LogLevel::Warning,
430 &gmx::TextOutputFile::standardError());
432 return builder.build();
435 int Mdrunner::mdrunner()
437 matrix box;
438 gmx_ddbox_t ddbox = {0};
439 int npme_major, npme_minor;
440 t_nrnb *nrnb;
441 gmx_mtop_t *mtop = nullptr;
442 t_mdatoms *mdatoms = nullptr;
443 t_forcerec *fr = nullptr;
444 t_fcdata *fcd = nullptr;
445 real ewaldcoeff_q = 0;
446 real ewaldcoeff_lj = 0;
447 struct gmx_pme_t **pmedata = nullptr;
448 gmx_vsite_t *vsite = nullptr;
449 gmx_constr_t constr;
450 int nChargePerturbed = -1, nTypePerturbed = 0, status;
451 gmx_wallcycle_t wcycle;
452 gmx_walltime_accounting_t walltime_accounting = nullptr;
453 int rc;
454 gmx_int64_t reset_counters;
455 int nthreads_pme = 1;
456 gmx_membed_t * membed = nullptr;
457 gmx_hw_info_t *hwinfo = nullptr;
459 /* CAUTION: threads may be started later on in this function, so
460 cr doesn't reflect the final parallel state right now */
461 gmx::MDModules mdModules;
462 t_inputrec inputrecInstance;
463 t_inputrec *inputrec = &inputrecInstance;
464 snew(mtop, 1);
466 if (mdrunOptions.continuationOptions.appendFiles)
468 fplog = nullptr;
471 bool doMembed = opt2bSet("-membed", nfile, fnm);
472 bool doRerun = mdrunOptions.rerun;
474 /* Handle GPU-related user options. Later, we check consistency
475 * with things like whether support is compiled, or tMPI thread
476 * count. */
477 EmulateGpuNonbonded emulateGpuNonbonded = (getenv("GMX_EMULATE_GPU") != nullptr ?
478 EmulateGpuNonbonded::Yes : EmulateGpuNonbonded::No);
479 bool forceUseCpu = (strncmp(nbpu_opt, "cpu", 3) == 0);
480 if (!hw_opt.gpuIdTaskAssignment.empty() && forceUseCpu)
482 gmx_fatal(FARGS, "GPU IDs were specified, and short-ranged interactions were assigned to the CPU. Make no more than one of these choices.");
484 bool forceUsePhysicalGpu = (strncmp(nbpu_opt, "gpu", 3) == 0) || !hw_opt.gpuIdTaskAssignment.empty();
485 bool tryUsePhysicalGpu = (strncmp(nbpu_opt, "auto", 4) == 0) && hw_opt.gpuIdTaskAssignment.empty() && (emulateGpuNonbonded == EmulateGpuNonbonded::No);
486 GMX_RELEASE_ASSERT(!(forceUsePhysicalGpu && tryUsePhysicalGpu), "Must either force use of "
487 "GPUs for short-ranged interactions, or try to use them, not both.");
489 // Here we assume that SIMMASTER(cr) does not change even after the
490 // threads are started.
491 gmx::LoggerOwner logOwner(buildLogger(fplog, cr));
492 gmx::MDLogger mdlog(logOwner.logger());
494 hwinfo = gmx_detect_hardware(mdlog, cr);
496 gmx_print_detected_hardware(fplog, cr, mdlog, hwinfo);
498 if (fplog != nullptr)
500 /* Print references after all software/hardware printing */
501 please_cite(fplog, "Abraham2015");
502 please_cite(fplog, "Pall2015");
503 please_cite(fplog, "Pronk2013");
504 please_cite(fplog, "Hess2008b");
505 please_cite(fplog, "Spoel2005a");
506 please_cite(fplog, "Lindahl2001a");
507 please_cite(fplog, "Berendsen95a");
510 std::unique_ptr<t_state> globalState;
512 if (SIMMASTER(cr))
514 /* Only the master rank has the global state */
515 globalState = std::unique_ptr<t_state>(new t_state);
517 /* Read (nearly) all data required for the simulation */
518 read_tpx_state(ftp2fn(efTPR, nfile, fnm), inputrec, globalState.get(), mtop);
520 exitIfCannotForceGpuRun(forceUsePhysicalGpu,
521 emulateGpuNonbonded,
522 inputrec->cutoff_scheme == ecutsVERLET,
523 compatibleGpusFound(hwinfo->gpu_info));
525 if (inputrec->cutoff_scheme == ecutsVERLET)
527 /* TODO This logic could run later, e.g. before -npme -1
528 is handled. If inputrec has already been communicated,
529 then the resulting tryUsePhysicalGpu does not need to
530 be communicated. */
531 if ((tryUsePhysicalGpu || forceUsePhysicalGpu) &&
532 !gpuAccelerationIsUseful(mdlog, inputrec, doRerun))
534 /* Fallback message printed by nbnxn_acceleration_supported */
535 if (forceUsePhysicalGpu)
537 gmx_fatal(FARGS, "GPU acceleration requested, but not supported with the given input settings");
539 tryUsePhysicalGpu = false;
542 else
544 if (nstlist_cmdline > 0)
546 gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme");
549 if (compatibleGpusFound(hwinfo->gpu_info))
551 GMX_LOG(mdlog.warning).asParagraph().appendText(
552 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
553 " To use a GPU, set the mdp option: cutoff-scheme = Verlet");
555 tryUsePhysicalGpu = false;
558 bool nonbondedOnGpu = (tryUsePhysicalGpu || forceUsePhysicalGpu) && compatibleGpusFound(hwinfo->gpu_info);
560 /* Check and update the hardware options for internal consistency */
561 check_and_update_hw_opt_1(&hw_opt, cr, domdecOptions.numPmeRanks);
563 /* Early check for externally set process affinity. */
564 gmx_check_thread_affinity_set(mdlog, cr,
565 &hw_opt, hwinfo->nthreads_hw_avail, FALSE);
567 #if GMX_THREAD_MPI
568 if (SIMMASTER(cr))
570 if (domdecOptions.numPmeRanks > 0 && hw_opt.nthreads_tmpi <= 0)
572 gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
575 /* Since the master knows the cut-off scheme, update hw_opt for this.
576 * This is done later for normal MPI and also once more with tMPI
577 * for all tMPI ranks.
579 check_and_update_hw_opt_2(&hw_opt, inputrec->cutoff_scheme);
581 /* Determine how many thread-MPI ranks to start.
583 * TODO Over-writing the user-supplied value here does
584 * prevent any possible subsequent checks from working
585 * correctly. */
586 hw_opt.nthreads_tmpi = get_nthreads_mpi(hwinfo,
587 &hw_opt,
588 domdecOptions.numPmeRanks,
589 nonbondedOnGpu,
590 inputrec, mtop,
591 mdlog,
592 doMembed);
594 // Now start the threads for thread MPI.
595 cr = spawnThreads(hw_opt.nthreads_tmpi);
596 /* The main thread continues here with a new cr. We don't deallocate
597 the old cr because other threads may still be reading it. */
598 // TODO Both master and spawned threads call dup_tfn and
599 // reinitialize_commrec_for_this_thread. Find a way to express
600 // this better.
602 #endif
603 /* END OF CAUTION: cr is now reliable */
605 if (PAR(cr))
607 /* now broadcast everything to the non-master nodes/threads: */
608 init_parallel(cr, inputrec, mtop);
610 gmx_bcast_sim(sizeof(nonbondedOnGpu), &nonbondedOnGpu, cr);
612 // TODO: Error handling
613 mdModules.assignOptionsToModules(*inputrec->params, nullptr);
615 if (fplog != nullptr)
617 pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
618 fprintf(fplog, "\n");
621 if (SIMMASTER(cr))
623 /* now make sure the state is initialized and propagated */
624 set_state_entries(globalState.get(), inputrec);
627 /* NM and TPI parallelize over force/energy calculations, not atoms,
628 * so we need to initialize and broadcast the global state.
630 if (inputrec->eI == eiNM || inputrec->eI == eiTPI)
632 if (!MASTER(cr))
634 globalState = std::unique_ptr<t_state>(new t_state);
636 broadcastStateWithoutDynamics(cr, globalState.get());
639 /* A parallel command line option consistency check that we can
640 only do after any threads have started. */
641 if (!PAR(cr) && (domdecOptions.numCells[XX] > 1 ||
642 domdecOptions.numCells[YY] > 1 ||
643 domdecOptions.numCells[ZZ] > 1 ||
644 domdecOptions.numPmeRanks > 0))
646 gmx_fatal(FARGS,
647 "The -dd or -npme option request a parallel simulation, "
648 #if !GMX_MPI
649 "but %s was compiled without threads or MPI enabled"
650 #else
651 #if GMX_THREAD_MPI
652 "but the number of MPI-threads (option -ntmpi) is not set or is 1"
653 #else
654 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
655 #endif
656 #endif
657 , output_env_get_program_display_name(oenv)
661 if (doRerun &&
662 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
664 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
667 if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr))
669 gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
672 if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)))
674 if (domdecOptions.numPmeRanks > 0)
676 gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
677 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
680 domdecOptions.numPmeRanks = 0;
683 if (nonbondedOnGpu && domdecOptions.numPmeRanks < 0)
685 /* With GPUs we don't automatically use PME-only ranks. PME ranks can
686 * improve performance with many threads per GPU, since our OpenMP
687 * scaling is bad, but it's difficult to automate the setup.
689 domdecOptions.numPmeRanks = 0;
692 #ifdef GMX_FAHCORE
693 if (MASTER(cr))
695 fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
697 #endif
699 /* NMR restraints must be initialized before load_checkpoint,
700 * since with time averaging the history is added to t_state.
701 * For proper consistency check we therefore need to extend
702 * t_state here.
703 * So the PME-only nodes (if present) will also initialize
704 * the distance restraints.
706 snew(fcd, 1);
708 /* This needs to be called before read_checkpoint to extend the state */
709 init_disres(fplog, mtop, inputrec, cr, fcd, globalState.get(), replExParams.exchangeInterval > 0);
711 init_orires(fplog, mtop, inputrec, cr, globalState.get(), &(fcd->orires));
713 if (inputrecDeform(inputrec))
715 /* Store the deform reference box before reading the checkpoint */
716 if (SIMMASTER(cr))
718 copy_mat(globalState->box, box);
720 if (PAR(cr))
722 gmx_bcast(sizeof(box), box, cr);
724 /* Because we do not have the update struct available yet
725 * in which the reference values should be stored,
726 * we store them temporarily in static variables.
727 * This should be thread safe, since they are only written once
728 * and with identical values.
730 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
731 deform_init_init_step_tpx = inputrec->init_step;
732 copy_mat(box, deform_init_box_tpx);
733 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
736 ObservablesHistory observablesHistory = {};
738 ContinuationOptions &continuationOptions = mdrunOptions.continuationOptions;
740 if (continuationOptions.startedFromCheckpoint)
742 /* Check if checkpoint file exists before doing continuation.
743 * This way we can use identical input options for the first and subsequent runs...
745 gmx_bool bReadEkin;
747 load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog,
748 cr, domdecOptions.numCells,
749 inputrec, globalState.get(),
750 &bReadEkin, &observablesHistory,
751 continuationOptions.appendFiles,
752 continuationOptions.appendFilesOptionSet,
753 mdrunOptions.reproducible);
755 if (bReadEkin)
757 continuationOptions.haveReadEkin = true;
761 if (SIMMASTER(cr) && continuationOptions.appendFiles)
763 gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr,
764 continuationOptions.appendFiles, &fplog);
765 logOwner = buildLogger(fplog, nullptr);
766 mdlog = logOwner.logger();
769 /* override nsteps with value set on the commamdline */
770 override_nsteps_cmdline(mdlog, mdrunOptions.numStepsCommandline, inputrec);
772 if (SIMMASTER(cr))
774 copy_mat(globalState->box, box);
777 if (PAR(cr))
779 gmx_bcast(sizeof(box), box, cr);
782 /* Update rlist and nstlist. */
783 if (inputrec->cutoff_scheme == ecutsVERLET)
785 prepare_verlet_scheme(fplog, cr, inputrec, nstlist_cmdline, mtop, box,
786 nonbondedOnGpu || (emulateGpuNonbonded == EmulateGpuNonbonded::Yes), *hwinfo->cpuInfo);
789 if (PAR(cr) && !(EI_TPI(inputrec->eI) ||
790 inputrec->eI == eiNM))
792 const rvec *xOnMaster = (SIMMASTER(cr) ? as_rvec_array(globalState->x.data()) : nullptr);
794 cr->dd = init_domain_decomposition(fplog, cr, domdecOptions, mdrunOptions,
795 mtop, inputrec,
796 box, xOnMaster,
797 &ddbox, &npme_major, &npme_minor);
798 // Note that local state still does not exist yet.
800 else
802 /* PME, if used, is done on all nodes with 1D decomposition */
803 cr->npmenodes = 0;
804 cr->duty = (DUTY_PP | DUTY_PME);
805 npme_major = 1;
806 npme_minor = 1;
808 if (inputrec->ePBC == epbcSCREW)
810 gmx_fatal(FARGS,
811 "pbc=%s is only implemented with domain decomposition",
812 epbc_names[inputrec->ePBC]);
816 if (PAR(cr))
818 /* After possible communicator splitting in make_dd_communicators.
819 * we can set up the intra/inter node communication.
821 gmx_setup_nodecomm(fplog, cr);
824 /* Initialize per-physical-node MPI process/thread ID and counters. */
825 gmx_init_intranode_counters(cr);
826 #if GMX_MPI
827 if (MULTISIM(cr))
829 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
830 "This is simulation %d out of %d running as a composite GROMACS\n"
831 "multi-simulation job. Setup for this simulation:\n",
832 cr->ms->sim, cr->ms->nsim);
834 GMX_LOG(mdlog.warning).appendTextFormatted(
835 "Using %d MPI %s\n",
836 cr->nnodes,
837 #if GMX_THREAD_MPI
838 cr->nnodes == 1 ? "thread" : "threads"
839 #else
840 cr->nnodes == 1 ? "process" : "processes"
841 #endif
843 fflush(stderr);
844 #endif
846 /* Check and update hw_opt for the cut-off scheme */
847 check_and_update_hw_opt_2(&hw_opt, inputrec->cutoff_scheme);
849 /* Check and update hw_opt for the number of MPI ranks */
850 check_and_update_hw_opt_3(&hw_opt);
852 gmx_omp_nthreads_init(mdlog, cr,
853 hwinfo->nthreads_hw_avail,
854 hw_opt.nthreads_omp,
855 hw_opt.nthreads_omp_pme,
856 (cr->duty & DUTY_PP) == 0,
857 inputrec->cutoff_scheme == ecutsVERLET);
859 #ifndef NDEBUG
860 if (EI_TPI(inputrec->eI) &&
861 inputrec->cutoff_scheme == ecutsVERLET)
863 gmx_feenableexcept();
865 #endif
867 // Contains the ID of the GPU used by each PP rank on this node,
868 // indexed by that rank. Empty if no GPUs are selected for use on
869 // this node.
870 std::vector<int> gpuTaskAssignment;
871 if (nonbondedOnGpu)
873 /* Currently the DD code assigns duty to ranks that can
874 * include PP work that currently can be executed on a single
875 * GPU, if present and compatible. This has to be coordinated
876 * across PP ranks on a node, with possible multiple devices
877 * or sharing devices on a node, either from the user
878 * selection, or automatically. */
879 bool rankCanUseGpu = cr->duty & DUTY_PP;
880 gpuTaskAssignment = mapPpRanksToGpus(rankCanUseGpu, cr, hwinfo->gpu_info, hw_opt);
883 reportGpuUsage(mdlog, hwinfo->gpu_info, !hw_opt.gpuIdTaskAssignment.empty(),
884 gpuTaskAssignment, cr->nrank_pp_intranode, cr->nnodes > 1);
886 if (!gpuTaskAssignment.empty())
888 GMX_RELEASE_ASSERT(cr->nrank_pp_intranode == static_cast<int>(gpuTaskAssignment.size()),
889 "The number of PP ranks on each node must equal the number of GPU tasks used on each node");
892 /* Prevent other ranks from continuing after an issue was found
893 * and reported as a fatal error.
895 * TODO This function implements a barrier so that MPI runtimes
896 * can organize an orderly shutdown if one of the ranks has had to
897 * issue a fatal error in various code already run. When we have
898 * MPI-aware error handling and reporting, this should be
899 * improved. */
900 #if GMX_MPI
901 if (PAR(cr))
903 MPI_Barrier(cr->mpi_comm_mysim);
905 #endif
907 /* Now that we know the setup is consistent, check for efficiency */
908 check_resource_division_efficiency(hwinfo, hw_opt.nthreads_tot, !gpuTaskAssignment.empty(), mdrunOptions.ntompOptionIsSet,
909 cr, mdlog);
911 gmx_device_info_t *shortRangedDeviceInfo = nullptr;
912 int shortRangedDeviceId = -1;
913 if (cr->duty & DUTY_PP)
915 if (!gpuTaskAssignment.empty())
917 shortRangedDeviceId = gpuTaskAssignment[cr->rank_pp_intranode];
918 shortRangedDeviceInfo = getDeviceInfo(hwinfo->gpu_info, shortRangedDeviceId);
922 if (DOMAINDECOMP(cr))
924 /* When we share GPUs over ranks, we need to know this for the DLB */
925 dd_setup_dlb_resource_sharing(cr, shortRangedDeviceId);
928 /* getting number of PP/PME threads
929 PME: env variable should be read only on one node to make sure it is
930 identical everywhere;
932 nthreads_pme = gmx_omp_nthreads_get(emntPME);
934 wcycle = wallcycle_init(fplog, mdrunOptions.timingOptions.resetStep, cr);
936 if (PAR(cr))
938 /* Master synchronizes its value of reset_counters with all nodes
939 * including PME only nodes */
940 reset_counters = wcycle_get_reset_counters(wcycle);
941 gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
942 wcycle_set_reset_counters(wcycle, reset_counters);
945 // Membrane embedding must be initialized before we call init_forcerec()
946 if (doMembed)
948 if (MASTER(cr))
950 fprintf(stderr, "Initializing membed");
952 /* Note that membed cannot work in parallel because mtop is
953 * changed here. Fix this if we ever want to make it run with
954 * multiple ranks. */
955 membed = init_membed(fplog, nfile, fnm, mtop, inputrec, globalState.get(), cr, &mdrunOptions.checkpointOptions.period);
958 snew(nrnb, 1);
959 if (cr->duty & DUTY_PP)
961 /* Initiate forcerecord */
962 fr = mk_forcerec();
963 fr->forceProviders = mdModules.initForceProviders();
964 init_forcerec(fplog, mdlog, fr, fcd,
965 inputrec, mtop, cr, box,
966 opt2fn("-table", nfile, fnm),
967 opt2fn("-tablep", nfile, fnm),
968 getFilenm("-tableb", nfile, fnm),
969 shortRangedDeviceInfo,
970 FALSE,
971 pforce);
973 /* Initialize QM-MM */
974 if (fr->bQMMM)
976 init_QMMMrec(cr, mtop, inputrec, fr);
979 /* Initialize the mdatoms structure.
980 * mdatoms is not filled with atom data,
981 * as this can not be done now with domain decomposition.
983 mdatoms = init_mdatoms(fplog, *mtop, *inputrec);
985 /* Initialize the virtual site communication */
986 vsite = init_vsite(mtop, cr, FALSE);
988 calc_shifts(box, fr->shift_vec);
990 /* With periodic molecules the charge groups should be whole at start up
991 * and the virtual sites should not be far from their proper positions.
993 if (!inputrec->bContinuation && MASTER(cr) &&
994 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
996 rvec *xGlobal = as_rvec_array(globalState->x.data());
998 /* Make molecules whole at start of run */
999 if (fr->ePBC != epbcNONE)
1001 do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, xGlobal);
1003 if (vsite)
1005 /* Correct initial vsite positions are required
1006 * for the initial distribution in the domain decomposition
1007 * and for the initial shell prediction.
1009 construct_vsites_mtop(vsite, mtop, xGlobal);
1013 if (EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
1015 ewaldcoeff_q = fr->ic->ewaldcoeff_q;
1016 ewaldcoeff_lj = fr->ic->ewaldcoeff_lj;
1017 pmedata = &fr->pmedata;
1019 else
1021 pmedata = nullptr;
1024 else
1026 /* This is a PME only node */
1028 GMX_ASSERT(globalState == nullptr, "We don't need the state on a PME only rank and expect it to be unitialized");
1030 ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol);
1031 ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj);
1032 snew(pmedata, 1);
1035 if (hw_opt.thread_affinity != threadaffOFF)
1037 /* Before setting affinity, check whether the affinity has changed
1038 * - which indicates that probably the OpenMP library has changed it
1039 * since we first checked).
1041 gmx_check_thread_affinity_set(mdlog, cr,
1042 &hw_opt, hwinfo->nthreads_hw_avail, TRUE);
1044 int nthread_local;
1045 /* threads on this MPI process or TMPI thread */
1046 if (cr->duty & DUTY_PP)
1048 nthread_local = gmx_omp_nthreads_get(emntNonbonded);
1050 else
1052 nthread_local = gmx_omp_nthreads_get(emntPME);
1055 /* Set the CPU affinity */
1056 gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo->hardwareTopology,
1057 nthread_local, nullptr);
1060 /* Initiate PME if necessary,
1061 * either on all nodes or on dedicated PME nodes only. */
1062 if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
1064 if (mdatoms)
1066 nChargePerturbed = mdatoms->nChargePerturbed;
1067 if (EVDW_PME(inputrec->vdwtype))
1069 nTypePerturbed = mdatoms->nTypePerturbed;
1072 if (cr->npmenodes > 0)
1074 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1075 gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
1076 gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
1079 if (cr->duty & DUTY_PME)
1083 gmx_device_info_t *pmeGpuInfo = nullptr;
1084 auto runMode = PmeRunMode::CPU;
1085 status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec,
1086 mtop ? mtop->natoms : 0, nChargePerturbed, nTypePerturbed,
1087 mdrunOptions.reproducible,
1088 ewaldcoeff_q, ewaldcoeff_lj,
1089 nthreads_pme,
1090 runMode, nullptr, pmeGpuInfo, mdlog);
1092 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1093 if (status != 0)
1095 gmx_fatal(FARGS, "Error %d initializing PME", status);
1101 if (EI_DYNAMICS(inputrec->eI))
1103 /* Turn on signal handling on all nodes */
1105 * (A user signal from the PME nodes (if any)
1106 * is communicated to the PP nodes.
1108 signal_handler_install();
1111 if (cr->duty & DUTY_PP)
1113 /* Assumes uniform use of the number of OpenMP threads */
1114 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
1116 if (inputrec->bPull)
1118 /* Initialize pull code */
1119 inputrec->pull_work =
1120 init_pull(fplog, inputrec->pull, inputrec, nfile, fnm,
1121 mtop, cr, oenv, inputrec->fepvals->init_lambda,
1122 EI_DYNAMICS(inputrec->eI) && MASTER(cr),
1123 continuationOptions);
1126 if (inputrec->bRot)
1128 /* Initialize enforced rotation code */
1129 init_rot(fplog, inputrec, nfile, fnm, cr, globalState.get(), mtop, oenv, mdrunOptions);
1132 /* Let init_constraints know whether we have essential dynamics constraints.
1133 * TODO: inputrec should tell us whether we use an algorithm, not a file option or the checkpoint
1135 bool doEdsam = (opt2fn_null("-ei", nfile, fnm) != nullptr || observablesHistory.edsamHistory);
1137 constr = init_constraints(fplog, mtop, inputrec, doEdsam, cr);
1139 if (DOMAINDECOMP(cr))
1141 GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP");
1142 /* This call is not included in init_domain_decomposition mainly
1143 * because fr->cginfo_mb is set later.
1145 dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec,
1146 domdecOptions.checkBondedInteractions,
1147 fr->cginfo_mb);
1150 /* Now do whatever the user wants us to do (how flexible...) */
1151 my_integrator(inputrec->eI) (fplog, cr, mdlog, nfile, fnm,
1152 oenv,
1153 mdrunOptions,
1154 vsite, constr,
1155 mdModules.outputProvider(),
1156 inputrec, mtop,
1157 fcd,
1158 globalState.get(),
1159 &observablesHistory,
1160 mdatoms, nrnb, wcycle, fr,
1161 replExParams,
1162 membed,
1163 walltime_accounting);
1165 if (inputrec->bRot)
1167 finish_rot(inputrec->rot);
1170 if (inputrec->bPull)
1172 finish_pull(inputrec->pull_work);
1176 else
1178 GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
1179 /* do PME only */
1180 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
1181 gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, inputrec);
1184 wallcycle_stop(wcycle, ewcRUN);
1186 /* Finish up, write some stuff
1187 * if rerunMD, don't write last frame again
1189 finish_run(fplog, mdlog, cr,
1190 inputrec, nrnb, wcycle, walltime_accounting,
1191 fr ? fr->nbv : nullptr,
1192 fr ? fr->pmedata : nullptr,
1193 EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
1195 // Free PME data
1196 if (pmedata)
1198 gmx_pme_destroy(*pmedata); // TODO: pmedata is always a single element list, refactor
1199 pmedata = nullptr;
1202 /* Free GPU memory and context */
1203 free_gpu_resources(fr, cr, shortRangedDeviceInfo);
1205 if (doMembed)
1207 free_membed(membed);
1210 gmx_hardware_info_free(hwinfo);
1212 /* Does what it says */
1213 print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
1214 walltime_accounting_destroy(walltime_accounting);
1216 /* Close logfile already here if we were appending to it */
1217 if (MASTER(cr) && continuationOptions.appendFiles)
1219 gmx_log_close(fplog);
1222 rc = (int)gmx_get_stop_condition();
1224 #if GMX_THREAD_MPI
1225 /* we need to join all threads. The sub-threads join when they
1226 exit this function, but the master thread needs to be told to
1227 wait for that. */
1228 if (PAR(cr) && MASTER(cr))
1230 tMPI_Finalize();
1232 #endif
1234 return rc;
1237 } // namespace gmx