Enable TPI with the Verlet cut-off scheme
[gromacs.git] / src / gromacs / mdrun / runner.cpp
blob5caa3caee57552048aceb65916164c6917b013f2
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \internal \file
39 * \brief Implements the MD runner routine calling all integrators.
41 * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42 * \ingroup module_mdrun
44 #include "gmxpre.h"
46 #include "runner.h"
48 #include "config.h"
50 #include <cassert>
51 #include <cinttypes>
52 #include <csignal>
53 #include <cstdlib>
54 #include <cstring>
56 #include <algorithm>
57 #include <memory>
59 #include "gromacs/commandline/filenm.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_struct.h"
62 #include "gromacs/domdec/localatomsetmanager.h"
63 #include "gromacs/domdec/partition.h"
64 #include "gromacs/ewald/ewald_utils.h"
65 #include "gromacs/ewald/pme.h"
66 #include "gromacs/ewald/pme_gpu_program.h"
67 #include "gromacs/fileio/checkpoint.h"
68 #include "gromacs/fileio/gmxfio.h"
69 #include "gromacs/fileio/oenv.h"
70 #include "gromacs/fileio/tpxio.h"
71 #include "gromacs/gmxlib/network.h"
72 #include "gromacs/gmxlib/nrnb.h"
73 #include "gromacs/gpu_utils/gpu_utils.h"
74 #include "gromacs/hardware/cpuinfo.h"
75 #include "gromacs/hardware/detecthardware.h"
76 #include "gromacs/hardware/printhardware.h"
77 #include "gromacs/imd/imd.h"
78 #include "gromacs/listed_forces/disre.h"
79 #include "gromacs/listed_forces/gpubonded.h"
80 #include "gromacs/listed_forces/orires.h"
81 #include "gromacs/math/functions.h"
82 #include "gromacs/math/utilities.h"
83 #include "gromacs/math/vec.h"
84 #include "gromacs/mdlib/boxdeformation.h"
85 #include "gromacs/mdlib/broadcaststructs.h"
86 #include "gromacs/mdlib/calc_verletbuf.h"
87 #include "gromacs/mdlib/dispersioncorrection.h"
88 #include "gromacs/mdlib/enerdata_utils.h"
89 #include "gromacs/mdlib/force.h"
90 #include "gromacs/mdlib/forcerec.h"
91 #include "gromacs/mdlib/gmx_omp_nthreads.h"
92 #include "gromacs/mdlib/makeconstraints.h"
93 #include "gromacs/mdlib/md_support.h"
94 #include "gromacs/mdlib/mdatoms.h"
95 #include "gromacs/mdlib/membed.h"
96 #include "gromacs/mdlib/ppforceworkload.h"
97 #include "gromacs/mdlib/qmmm.h"
98 #include "gromacs/mdlib/sighandler.h"
99 #include "gromacs/mdlib/stophandler.h"
100 #include "gromacs/mdrun/mdmodules.h"
101 #include "gromacs/mdrun/simulationcontext.h"
102 #include "gromacs/mdrunutility/handlerestart.h"
103 #include "gromacs/mdrunutility/logging.h"
104 #include "gromacs/mdrunutility/multisim.h"
105 #include "gromacs/mdrunutility/printtime.h"
106 #include "gromacs/mdrunutility/threadaffinity.h"
107 #include "gromacs/mdtypes/commrec.h"
108 #include "gromacs/mdtypes/enerdata.h"
109 #include "gromacs/mdtypes/fcdata.h"
110 #include "gromacs/mdtypes/group.h"
111 #include "gromacs/mdtypes/inputrec.h"
112 #include "gromacs/mdtypes/md_enums.h"
113 #include "gromacs/mdtypes/mdrunoptions.h"
114 #include "gromacs/mdtypes/observableshistory.h"
115 #include "gromacs/mdtypes/state.h"
116 #include "gromacs/nbnxm/gpu_data_mgmt.h"
117 #include "gromacs/nbnxm/nbnxm.h"
118 #include "gromacs/nbnxm/pairlist_tuning.h"
119 #include "gromacs/pbcutil/pbc.h"
120 #include "gromacs/pulling/output.h"
121 #include "gromacs/pulling/pull.h"
122 #include "gromacs/pulling/pull_rotation.h"
123 #include "gromacs/restraint/manager.h"
124 #include "gromacs/restraint/restraintmdmodule.h"
125 #include "gromacs/restraint/restraintpotential.h"
126 #include "gromacs/swap/swapcoords.h"
127 #include "gromacs/taskassignment/decidegpuusage.h"
128 #include "gromacs/taskassignment/resourcedivision.h"
129 #include "gromacs/taskassignment/taskassignment.h"
130 #include "gromacs/taskassignment/usergpuids.h"
131 #include "gromacs/timing/gpu_timing.h"
132 #include "gromacs/timing/wallcycle.h"
133 #include "gromacs/timing/wallcyclereporting.h"
134 #include "gromacs/topology/mtop_util.h"
135 #include "gromacs/trajectory/trajectoryframe.h"
136 #include "gromacs/utility/basenetwork.h"
137 #include "gromacs/utility/cstringutil.h"
138 #include "gromacs/utility/exceptions.h"
139 #include "gromacs/utility/fatalerror.h"
140 #include "gromacs/utility/filestream.h"
141 #include "gromacs/utility/gmxassert.h"
142 #include "gromacs/utility/gmxmpi.h"
143 #include "gromacs/utility/keyvaluetree.h"
144 #include "gromacs/utility/logger.h"
145 #include "gromacs/utility/loggerbuilder.h"
146 #include "gromacs/utility/mdmodulenotification.h"
147 #include "gromacs/utility/physicalnodecommunicator.h"
148 #include "gromacs/utility/pleasecite.h"
149 #include "gromacs/utility/programcontext.h"
150 #include "gromacs/utility/smalloc.h"
151 #include "gromacs/utility/stringutil.h"
153 #include "isimulator.h"
154 #include "replicaexchange.h"
155 #include "simulatorbuilder.h"
157 #if GMX_FAHCORE
158 #include "corewrap.h"
159 #endif
161 namespace gmx
164 /*! \brief Log if development feature flags are encountered
166 * The use of dev features indicated by environment variables is logged
167 * in order to ensure that runs with such featrues enabled can be identified
168 * from their log and standard output.
170 * \param[in] mdlog Logger object.
172 static void reportDevelopmentFeatures(const gmx::MDLogger &mdlog)
174 const bool enableGpuBufOps = (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
175 const bool useGpuUpdateConstrain = (getenv("GMX_UPDATE_CONSTRAIN_GPU") != nullptr);
177 if (enableGpuBufOps)
179 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
180 "NOTE: This run uses the 'GPU buffer ops' feature, enabled by the GMX_USE_GPU_BUFFER_OPS environment variable.");
183 if (useGpuUpdateConstrain)
185 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
186 "NOTE: This run uses the 'GPU update/constraints' feature, enabled by the GMX_UPDATE_CONSTRAIN_GPU environment variable.");
190 /*! \brief Barrier for safe simultaneous thread access to mdrunner data
192 * Used to ensure that the master thread does not modify mdrunner during copy
193 * on the spawned threads. */
194 static void threadMpiMdrunnerAccessBarrier()
196 #if GMX_THREAD_MPI
197 MPI_Barrier(MPI_COMM_WORLD);
198 #endif
201 Mdrunner Mdrunner::cloneOnSpawnedThread() const
203 auto newRunner = Mdrunner(std::make_unique<MDModules>());
205 // All runners in the same process share a restraint manager resource because it is
206 // part of the interface to the client code, which is associated only with the
207 // original thread. Handles to the same resources can be obtained by copy.
209 newRunner.restraintManager_ = std::make_unique<RestraintManager>(*restraintManager_);
212 // Copy original cr pointer before master thread can pass the thread barrier
213 newRunner.cr = reinitialize_commrec_for_this_thread(cr);
215 // Copy members of master runner.
216 // \todo Replace with builder when Simulation context and/or runner phases are better defined.
217 // Ref https://redmine.gromacs.org/issues/2587 and https://redmine.gromacs.org/issues/2375
218 newRunner.hw_opt = hw_opt;
219 newRunner.filenames = filenames;
221 newRunner.oenv = oenv;
222 newRunner.mdrunOptions = mdrunOptions;
223 newRunner.domdecOptions = domdecOptions;
224 newRunner.nbpu_opt = nbpu_opt;
225 newRunner.pme_opt = pme_opt;
226 newRunner.pme_fft_opt = pme_fft_opt;
227 newRunner.bonded_opt = bonded_opt;
228 newRunner.nstlist_cmdline = nstlist_cmdline;
229 newRunner.replExParams = replExParams;
230 newRunner.pforce = pforce;
231 newRunner.ms = ms;
232 newRunner.startingBehavior = startingBehavior;
233 newRunner.stopHandlerBuilder_ = std::make_unique<StopHandlerBuilder>(*stopHandlerBuilder_);
235 threadMpiMdrunnerAccessBarrier();
237 GMX_RELEASE_ASSERT(!MASTER(newRunner.cr), "cloneOnSpawnedThread should only be called on spawned threads");
239 return newRunner;
242 /*! \brief The callback used for running on spawned threads.
244 * Obtains the pointer to the master mdrunner object from the one
245 * argument permitted to the thread-launch API call, copies it to make
246 * a new runner for this thread, reinitializes necessary data, and
247 * proceeds to the simulation. */
248 static void mdrunner_start_fn(const void *arg)
252 auto masterMdrunner = reinterpret_cast<const gmx::Mdrunner *>(arg);
253 /* copy the arg list to make sure that it's thread-local. This
254 doesn't copy pointed-to items, of course; fnm, cr and fplog
255 are reset in the call below, all others should be const. */
256 gmx::Mdrunner mdrunner = masterMdrunner->cloneOnSpawnedThread();
257 mdrunner.mdrunner();
259 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
263 /*! \brief Start thread-MPI threads.
265 * Called by mdrunner() to start a specific number of threads
266 * (including the main thread) for thread-parallel runs. This in turn
267 * calls mdrunner() for each thread. All options are the same as for
268 * mdrunner(). */
269 t_commrec *Mdrunner::spawnThreads(int numThreadsToLaunch) const
272 /* first check whether we even need to start tMPI */
273 if (numThreadsToLaunch < 2)
275 return cr;
278 #if GMX_THREAD_MPI
279 /* now spawn new threads that start mdrunner_start_fn(), while
280 the main thread returns. Thread affinity is handled later. */
281 if (tMPI_Init_fn(TRUE, numThreadsToLaunch, TMPI_AFFINITY_NONE,
282 mdrunner_start_fn, static_cast<const void*>(this)) != TMPI_SUCCESS)
284 GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads"));
287 threadMpiMdrunnerAccessBarrier();
288 #else
289 GMX_UNUSED_VALUE(mdrunner_start_fn);
290 #endif
292 return reinitialize_commrec_for_this_thread(cr);
295 } // namespace gmx
297 /*! \brief Initialize variables for Verlet scheme simulation */
298 static void prepare_verlet_scheme(FILE *fplog,
299 t_commrec *cr,
300 t_inputrec *ir,
301 int nstlist_cmdline,
302 const gmx_mtop_t *mtop,
303 const matrix box,
304 bool makeGpuPairList,
305 const gmx::CpuInfo &cpuinfo)
307 /* For NVE simulations, we will retain the initial list buffer */
308 if (EI_DYNAMICS(ir->eI) &&
309 ir->verletbuf_tol > 0 &&
310 !(EI_MD(ir->eI) && ir->etc == etcNO))
312 /* Update the Verlet buffer size for the current run setup */
314 /* Here we assume SIMD-enabled kernels are being used. But as currently
315 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
316 * and 4x2 gives a larger buffer than 4x4, this is ok.
318 ListSetupType listType = (makeGpuPairList ? ListSetupType::Gpu : ListSetupType::CpuSimdWhenSupported);
319 VerletbufListSetup listSetup = verletbufGetSafeListSetup(listType);
321 const real rlist_new =
322 calcVerletBufferSize(*mtop, det(box), *ir, ir->nstlist, ir->nstlist - 1, -1, listSetup);
324 if (rlist_new != ir->rlist)
326 if (fplog != nullptr)
328 fprintf(fplog, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
329 ir->rlist, rlist_new,
330 listSetup.cluster_size_i, listSetup.cluster_size_j);
332 ir->rlist = rlist_new;
336 if (nstlist_cmdline > 0 && (!EI_DYNAMICS(ir->eI) || ir->verletbuf_tol <= 0))
338 gmx_fatal(FARGS, "Can not set nstlist without %s",
339 !EI_DYNAMICS(ir->eI) ? "dynamics" : "verlet-buffer-tolerance");
342 if (EI_DYNAMICS(ir->eI))
344 /* Set or try nstlist values */
345 increaseNstlist(fplog, cr, ir, nstlist_cmdline, mtop, box, makeGpuPairList, cpuinfo);
349 /*! \brief Override the nslist value in inputrec
351 * with value passed on the command line (if any)
353 static void override_nsteps_cmdline(const gmx::MDLogger &mdlog,
354 int64_t nsteps_cmdline,
355 t_inputrec *ir)
357 assert(ir);
359 /* override with anything else than the default -2 */
360 if (nsteps_cmdline > -2)
362 char sbuf_steps[STEPSTRSIZE];
363 char sbuf_msg[STRLEN];
365 ir->nsteps = nsteps_cmdline;
366 if (EI_DYNAMICS(ir->eI) && nsteps_cmdline != -1)
368 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
369 gmx_step_str(nsteps_cmdline, sbuf_steps),
370 fabs(nsteps_cmdline*ir->delta_t));
372 else
374 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps",
375 gmx_step_str(nsteps_cmdline, sbuf_steps));
378 GMX_LOG(mdlog.warning).asParagraph().appendText(sbuf_msg);
380 else if (nsteps_cmdline < -2)
382 gmx_fatal(FARGS, "Invalid nsteps value passed on the command line: %" PRId64,
383 nsteps_cmdline);
385 /* Do nothing if nsteps_cmdline == -2 */
388 namespace gmx
391 /*! \brief Return whether GPU acceleration of nonbondeds is supported with the given settings.
393 * If not, and if a warning may be issued, logs a warning about
394 * falling back to CPU code. With thread-MPI, only the first
395 * call to this function should have \c issueWarning true. */
396 static bool gpuAccelerationOfNonbondedIsUseful(const MDLogger &mdlog,
397 const t_inputrec &ir,
398 bool issueWarning)
400 bool gpuIsUseful = true;
401 std::string warning;
403 if (ir.opts.ngener - ir.nwall > 1)
405 /* The GPU code does not support more than one energy group.
406 * If the user requested GPUs explicitly, a fatal error is given later.
408 gpuIsUseful = false;
409 warning =
410 "Multiple energy groups is not implemented for GPUs, falling back to the CPU. "
411 "For better performance, run on the GPU without energy groups and then do "
412 "gmx mdrun -rerun option on the trajectory with an energy group .tpr file.";
415 if (EI_TPI(ir.eI))
417 gpuIsUseful = false;
418 warning = "TPI is not implemented for GPUs.";
421 if (!gpuIsUseful && issueWarning)
423 GMX_LOG(mdlog.warning).asParagraph().appendText(warning);
426 return gpuIsUseful;
429 //! Initializes the logger for mdrun.
430 static gmx::LoggerOwner buildLogger(FILE *fplog, const t_commrec *cr)
432 gmx::LoggerBuilder builder;
433 if (fplog != nullptr)
435 builder.addTargetFile(gmx::MDLogger::LogLevel::Info, fplog);
437 if (cr == nullptr || SIMMASTER(cr))
439 builder.addTargetStream(gmx::MDLogger::LogLevel::Warning,
440 &gmx::TextOutputFile::standardError());
442 return builder.build();
445 //! Make a TaskTarget from an mdrun argument string.
446 static TaskTarget findTaskTarget(const char *optionString)
448 TaskTarget returnValue = TaskTarget::Auto;
450 if (strncmp(optionString, "auto", 3) == 0)
452 returnValue = TaskTarget::Auto;
454 else if (strncmp(optionString, "cpu", 3) == 0)
456 returnValue = TaskTarget::Cpu;
458 else if (strncmp(optionString, "gpu", 3) == 0)
460 returnValue = TaskTarget::Gpu;
462 else
464 GMX_ASSERT(false, "Option string should have been checked for sanity already");
467 return returnValue;
470 //! Finish run, aggregate data to print performance info.
471 static void finish_run(FILE *fplog,
472 const gmx::MDLogger &mdlog,
473 const t_commrec *cr,
474 const t_inputrec *inputrec,
475 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
476 gmx_walltime_accounting_t walltime_accounting,
477 nonbonded_verlet_t *nbv,
478 const gmx_pme_t *pme,
479 gmx_bool bWriteStat)
481 double delta_t = 0;
482 double nbfs = 0, mflop = 0;
483 double elapsed_time,
484 elapsed_time_over_all_ranks,
485 elapsed_time_over_all_threads,
486 elapsed_time_over_all_threads_over_all_ranks;
487 /* Control whether it is valid to print a report. Only the
488 simulation master may print, but it should not do so if the run
489 terminated e.g. before a scheduled reset step. This is
490 complicated by the fact that PME ranks are unaware of the
491 reason why they were sent a pmerecvqxFINISH. To avoid
492 communication deadlocks, we always do the communication for the
493 report, even if we've decided not to write the report, because
494 how long it takes to finish the run is not important when we've
495 decided not to report on the simulation performance.
497 Further, we only report performance for dynamical integrators,
498 because those are the only ones for which we plan to
499 consider doing any optimizations. */
500 bool printReport = EI_DYNAMICS(inputrec->eI) && SIMMASTER(cr);
502 if (printReport && !walltime_accounting_get_valid_finish(walltime_accounting))
504 GMX_LOG(mdlog.warning).asParagraph().appendText("Simulation ended prematurely, no performance report will be written.");
505 printReport = false;
508 t_nrnb *nrnb_tot;
509 std::unique_ptr<t_nrnb> nrnbTotalStorage;
510 if (cr->nnodes > 1)
512 nrnbTotalStorage = std::make_unique<t_nrnb>();
513 nrnb_tot = nrnbTotalStorage.get();
514 #if GMX_MPI
515 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
516 cr->mpi_comm_mysim);
517 #endif
519 else
521 nrnb_tot = nrnb;
524 elapsed_time = walltime_accounting_get_time_since_reset(walltime_accounting);
525 elapsed_time_over_all_threads = walltime_accounting_get_time_since_reset_over_all_threads(walltime_accounting);
526 if (cr->nnodes > 1)
528 #if GMX_MPI
529 /* reduce elapsed_time over all MPI ranks in the current simulation */
530 MPI_Allreduce(&elapsed_time,
531 &elapsed_time_over_all_ranks,
532 1, MPI_DOUBLE, MPI_SUM,
533 cr->mpi_comm_mysim);
534 elapsed_time_over_all_ranks /= cr->nnodes;
535 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
536 * current simulation. */
537 MPI_Allreduce(&elapsed_time_over_all_threads,
538 &elapsed_time_over_all_threads_over_all_ranks,
539 1, MPI_DOUBLE, MPI_SUM,
540 cr->mpi_comm_mysim);
541 #endif
543 else
545 elapsed_time_over_all_ranks = elapsed_time;
546 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
549 if (printReport)
551 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
554 if (thisRankHasDuty(cr, DUTY_PP) && DOMAINDECOMP(cr))
556 print_dd_statistics(cr, inputrec, fplog);
559 /* TODO Move the responsibility for any scaling by thread counts
560 * to the code that handled the thread region, so that there's a
561 * mechanism to keep cycle counting working during the transition
562 * to task parallelism. */
563 int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
564 int nthreads_pme = gmx_omp_nthreads_get(emntPME);
565 wallcycle_scale_by_num_threads(wcycle, thisRankHasDuty(cr, DUTY_PME) && !thisRankHasDuty(cr, DUTY_PP), nthreads_pp, nthreads_pme);
566 auto cycle_sum(wallcycle_sum(cr, wcycle));
568 if (printReport)
570 auto nbnxn_gpu_timings = (nbv != nullptr && nbv->useGpu()) ? Nbnxm::gpu_get_timings(nbv->gpu_nbv) : nullptr;
571 gmx_wallclock_gpu_pme_t pme_gpu_timings = {};
573 if (pme_gpu_task_enabled(pme))
575 pme_gpu_get_timings(pme, &pme_gpu_timings);
577 wallcycle_print(fplog, mdlog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
578 elapsed_time_over_all_ranks,
579 wcycle, cycle_sum,
580 nbnxn_gpu_timings,
581 &pme_gpu_timings);
583 if (EI_DYNAMICS(inputrec->eI))
585 delta_t = inputrec->delta_t;
588 if (fplog)
590 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
591 elapsed_time_over_all_ranks,
592 walltime_accounting_get_nsteps_done_since_reset(walltime_accounting),
593 delta_t, nbfs, mflop);
595 if (bWriteStat)
597 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
598 elapsed_time_over_all_ranks,
599 walltime_accounting_get_nsteps_done_since_reset(walltime_accounting),
600 delta_t, nbfs, mflop);
605 int Mdrunner::mdrunner()
607 matrix box;
608 t_forcerec *fr = nullptr;
609 t_fcdata *fcd = nullptr;
610 real ewaldcoeff_q = 0;
611 real ewaldcoeff_lj = 0;
612 int nChargePerturbed = -1, nTypePerturbed = 0;
613 gmx_wallcycle_t wcycle;
614 gmx_walltime_accounting_t walltime_accounting = nullptr;
615 gmx_membed_t * membed = nullptr;
616 gmx_hw_info_t *hwinfo = nullptr;
618 /* CAUTION: threads may be started later on in this function, so
619 cr doesn't reflect the final parallel state right now */
620 gmx_mtop_t mtop;
622 bool doMembed = opt2bSet("-membed", filenames.size(), filenames.data());
623 bool doRerun = mdrunOptions.rerun;
625 // Handle task-assignment related user options.
626 EmulateGpuNonbonded emulateGpuNonbonded = (getenv("GMX_EMULATE_GPU") != nullptr ?
627 EmulateGpuNonbonded::Yes : EmulateGpuNonbonded::No);
629 std::vector<int> userGpuTaskAssignment;
632 userGpuTaskAssignment = parseUserTaskAssignmentString(hw_opt.userGpuTaskAssignment);
634 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
635 auto nonbondedTarget = findTaskTarget(nbpu_opt);
636 auto pmeTarget = findTaskTarget(pme_opt);
637 auto pmeFftTarget = findTaskTarget(pme_fft_opt);
638 auto bondedTarget = findTaskTarget(bonded_opt);
639 PmeRunMode pmeRunMode = PmeRunMode::None;
641 // Here we assume that SIMMASTER(cr) does not change even after the
642 // threads are started.
644 FILE *fplog = nullptr;
645 // If we are appending, we don't write log output because we need
646 // to check that the old log file matches what the checkpoint file
647 // expects. Otherwise, we should start to write log output now if
648 // there is a file ready for it.
649 if (logFileHandle != nullptr && startingBehavior != StartingBehavior::RestartWithAppending)
651 fplog = gmx_fio_getfp(logFileHandle);
653 gmx::LoggerOwner logOwner(buildLogger(fplog, cr));
654 gmx::MDLogger mdlog(logOwner.logger());
656 // report any development features that may be enabled by environment variables
657 reportDevelopmentFeatures(mdlog);
659 // With thread-MPI, the communicator changes after threads are
660 // launched, so this is rebuilt for the master rank at that
661 // time. The non-master ranks are fine to keep the one made here.
662 PhysicalNodeCommunicator physicalNodeComm(MPI_COMM_WORLD, gmx_physicalnode_id_hash());
663 hwinfo = gmx_detect_hardware(mdlog, physicalNodeComm);
665 gmx_print_detected_hardware(fplog, isMasterSimMasterRank(ms, MASTER(cr)), mdlog, hwinfo);
667 std::vector<int> gpuIdsToUse = makeGpuIdsToUse(hwinfo->gpu_info, hw_opt.gpuIdsAvailable);
669 // Print citation requests after all software/hardware printing
670 pleaseCiteGromacs(fplog);
672 // TODO Replace this by unique_ptr once t_inputrec is C++
673 t_inputrec inputrecInstance;
674 t_inputrec *inputrec = nullptr;
675 std::unique_ptr<t_state> globalState;
677 if (SIMMASTER(cr))
679 /* Only the master rank has the global state */
680 globalState = std::make_unique<t_state>();
682 /* Read (nearly) all data required for the simulation */
683 read_tpx_state(ftp2fn(efTPR, filenames.size(), filenames.data()),
684 &inputrecInstance, globalState.get(), &mtop);
685 inputrec = &inputrecInstance;
688 /* Check and update the hardware options for internal consistency */
689 checkAndUpdateHardwareOptions(mdlog, &hw_opt, SIMMASTER(cr), domdecOptions.numPmeRanks, inputrec);
691 if (GMX_THREAD_MPI && SIMMASTER(cr))
693 bool useGpuForNonbonded = false;
694 bool useGpuForPme = false;
697 GMX_RELEASE_ASSERT(inputrec != nullptr, "Keep the compiler happy");
699 // If the user specified the number of ranks, then we must
700 // respect that, but in default mode, we need to allow for
701 // the number of GPUs to choose the number of ranks.
702 auto canUseGpuForNonbonded = buildSupportsNonbondedOnGpu(nullptr);
703 useGpuForNonbonded = decideWhetherToUseGpusForNonbondedWithThreadMpi
704 (nonbondedTarget, gpuIdsToUse, userGpuTaskAssignment, emulateGpuNonbonded,
705 canUseGpuForNonbonded,
706 gpuAccelerationOfNonbondedIsUseful(mdlog, *inputrec, GMX_THREAD_MPI),
707 hw_opt.nthreads_tmpi);
708 useGpuForPme = decideWhetherToUseGpusForPmeWithThreadMpi
709 (useGpuForNonbonded, pmeTarget, gpuIdsToUse, userGpuTaskAssignment,
710 *hwinfo, *inputrec, mtop, hw_opt.nthreads_tmpi, domdecOptions.numPmeRanks);
713 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
715 /* Determine how many thread-MPI ranks to start.
717 * TODO Over-writing the user-supplied value here does
718 * prevent any possible subsequent checks from working
719 * correctly. */
720 hw_opt.nthreads_tmpi = get_nthreads_mpi(hwinfo,
721 &hw_opt,
722 gpuIdsToUse,
723 useGpuForNonbonded,
724 useGpuForPme,
725 inputrec, &mtop,
726 mdlog,
727 doMembed);
729 // Now start the threads for thread MPI.
730 cr = spawnThreads(hw_opt.nthreads_tmpi);
731 /* The main thread continues here with a new cr. We don't deallocate
732 the old cr because other threads may still be reading it. */
733 // TODO Both master and spawned threads call dup_tfn and
734 // reinitialize_commrec_for_this_thread. Find a way to express
735 // this better.
736 physicalNodeComm = PhysicalNodeCommunicator(MPI_COMM_WORLD, gmx_physicalnode_id_hash());
738 // END OF CAUTION: cr and physicalNodeComm are now reliable
740 if (PAR(cr))
742 /* now broadcast everything to the non-master nodes/threads: */
743 if (!SIMMASTER(cr))
745 inputrec = &inputrecInstance;
747 init_parallel(cr, inputrec, &mtop);
749 GMX_RELEASE_ASSERT(inputrec != nullptr, "All range should have a valid inputrec now");
751 // Now each rank knows the inputrec that SIMMASTER read and used,
752 // and (if applicable) cr->nnodes has been assigned the number of
753 // thread-MPI ranks that have been chosen. The ranks can now all
754 // run the task-deciding functions and will agree on the result
755 // without needing to communicate.
757 // TODO Should we do the communication in debug mode to support
758 // having an assertion?
760 // Note that these variables describe only their own node.
762 // Note that when bonded interactions run on a GPU they always run
763 // alongside a nonbonded task, so do not influence task assignment
764 // even though they affect the force calculation workload.
765 bool useGpuForNonbonded = false;
766 bool useGpuForPme = false;
767 bool useGpuForBonded = false;
770 // It's possible that there are different numbers of GPUs on
771 // different nodes, which is the user's responsibilty to
772 // handle. If unsuitable, we will notice that during task
773 // assignment.
774 bool gpusWereDetected = hwinfo->ngpu_compatible_tot > 0;
775 auto canUseGpuForNonbonded = buildSupportsNonbondedOnGpu(nullptr);
776 useGpuForNonbonded = decideWhetherToUseGpusForNonbonded(nonbondedTarget, userGpuTaskAssignment,
777 emulateGpuNonbonded,
778 canUseGpuForNonbonded,
779 gpuAccelerationOfNonbondedIsUseful(mdlog, *inputrec, !GMX_THREAD_MPI),
780 gpusWereDetected);
781 useGpuForPme = decideWhetherToUseGpusForPme(useGpuForNonbonded, pmeTarget, userGpuTaskAssignment,
782 *hwinfo, *inputrec, mtop,
783 cr->nnodes, domdecOptions.numPmeRanks,
784 gpusWereDetected);
785 auto canUseGpuForBonded = buildSupportsGpuBondeds(nullptr) && inputSupportsGpuBondeds(*inputrec, mtop, nullptr);
786 useGpuForBonded =
787 decideWhetherToUseGpusForBonded(useGpuForNonbonded, useGpuForPme,
788 bondedTarget, canUseGpuForBonded,
789 EVDW_PME(inputrec->vdwtype),
790 EEL_PME_EWALD(inputrec->coulombtype),
791 domdecOptions.numPmeRanks, gpusWereDetected);
793 pmeRunMode = (useGpuForPme ? PmeRunMode::GPU : PmeRunMode::CPU);
794 if (pmeRunMode == PmeRunMode::GPU)
796 if (pmeFftTarget == TaskTarget::Cpu)
798 pmeRunMode = PmeRunMode::Mixed;
801 else if (pmeFftTarget == TaskTarget::Gpu)
803 gmx_fatal(FARGS, "Assigning FFTs to GPU requires PME to be assigned to GPU as well. With PME on CPU you should not be using -pmefft.");
806 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
808 // Build restraints.
809 // TODO: hide restraint implementation details from Mdrunner.
810 // There is nothing unique about restraints at this point as far as the
811 // Mdrunner is concerned. The Mdrunner should just be getting a sequence of
812 // factory functions from the SimulationContext on which to call mdModules_->add().
813 // TODO: capture all restraints into a single RestraintModule, passed to the runner builder.
814 for (auto && restraint : restraintManager_->getRestraints())
816 auto module = RestraintMDModule::create(restraint,
817 restraint->sites());
818 mdModules_->add(std::move(module));
821 // TODO: Error handling
822 mdModules_->assignOptionsToModules(*inputrec->params, nullptr);
823 const auto &mdModulesNotifier = mdModules_->notifier().notifier_;
825 if (inputrec->internalParameters != nullptr)
827 mdModulesNotifier.notify(*inputrec->internalParameters);
830 if (fplog != nullptr)
832 pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
833 fprintf(fplog, "\n");
836 if (SIMMASTER(cr))
838 /* In rerun, set velocities to zero if present */
839 if (doRerun && ((globalState->flags & (1 << estV)) != 0))
841 // rerun does not use velocities
842 GMX_LOG(mdlog.info).asParagraph().appendText(
843 "Rerun trajectory contains velocities. Rerun does only evaluate "
844 "potential energy and forces. The velocities will be ignored.");
845 for (int i = 0; i < globalState->natoms; i++)
847 clear_rvec(globalState->v[i]);
849 globalState->flags &= ~(1 << estV);
852 /* now make sure the state is initialized and propagated */
853 set_state_entries(globalState.get(), inputrec);
856 /* NM and TPI parallelize over force/energy calculations, not atoms,
857 * so we need to initialize and broadcast the global state.
859 if (inputrec->eI == eiNM || inputrec->eI == eiTPI)
861 if (!MASTER(cr))
863 globalState = std::make_unique<t_state>();
865 broadcastStateWithoutDynamics(cr, globalState.get());
868 /* A parallel command line option consistency check that we can
869 only do after any threads have started. */
870 if (!PAR(cr) && (domdecOptions.numCells[XX] > 1 ||
871 domdecOptions.numCells[YY] > 1 ||
872 domdecOptions.numCells[ZZ] > 1 ||
873 domdecOptions.numPmeRanks > 0))
875 gmx_fatal(FARGS,
876 "The -dd or -npme option request a parallel simulation, "
877 #if !GMX_MPI
878 "but %s was compiled without threads or MPI enabled", output_env_get_program_display_name(oenv));
879 #else
880 #if GMX_THREAD_MPI
881 "but the number of MPI-threads (option -ntmpi) is not set or is 1");
882 #else
883 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec", output_env_get_program_display_name(oenv));
884 #endif
885 #endif
888 if (doRerun &&
889 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
891 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
894 if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)))
896 if (domdecOptions.numPmeRanks > 0)
898 gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
899 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
902 domdecOptions.numPmeRanks = 0;
905 if (useGpuForNonbonded && domdecOptions.numPmeRanks < 0)
907 /* With NB GPUs we don't automatically use PME-only CPU ranks. PME ranks can
908 * improve performance with many threads per GPU, since our OpenMP
909 * scaling is bad, but it's difficult to automate the setup.
911 domdecOptions.numPmeRanks = 0;
913 if (useGpuForPme)
915 if (domdecOptions.numPmeRanks < 0)
917 domdecOptions.numPmeRanks = 0;
918 // TODO possibly print a note that one can opt-in for a separate PME GPU rank?
920 else
922 GMX_RELEASE_ASSERT(domdecOptions.numPmeRanks <= 1, "PME GPU decomposition is not supported");
926 #if GMX_FAHCORE
927 if (MASTER(cr))
929 fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
931 #endif
933 /* NMR restraints must be initialized before load_checkpoint,
934 * since with time averaging the history is added to t_state.
935 * For proper consistency check we therefore need to extend
936 * t_state here.
937 * So the PME-only nodes (if present) will also initialize
938 * the distance restraints.
940 snew(fcd, 1);
942 /* This needs to be called before read_checkpoint to extend the state */
943 init_disres(fplog, &mtop, inputrec, cr, ms, fcd, globalState.get(), replExParams.exchangeInterval > 0);
945 init_orires(fplog, &mtop, inputrec, cr, ms, globalState.get(), &(fcd->orires));
947 auto deform = prepareBoxDeformation(globalState->box, cr, *inputrec);
949 ObservablesHistory observablesHistory = {};
951 if (startingBehavior != StartingBehavior::NewSimulation)
953 /* Check if checkpoint file exists before doing continuation.
954 * This way we can use identical input options for the first and subsequent runs...
956 if (mdrunOptions.numStepsCommandline > -2)
958 /* Temporarily set the number of steps to unmlimited to avoid
959 * triggering the nsteps check in load_checkpoint().
960 * This hack will go away soon when the -nsteps option is removed.
962 inputrec->nsteps = -1;
965 load_checkpoint(opt2fn_master("-cpi", filenames.size(), filenames.data(), cr),
966 logFileHandle,
967 cr, domdecOptions.numCells,
968 inputrec, globalState.get(),
969 &observablesHistory,
970 mdrunOptions.reproducible, mdModules_->notifier());
972 if (startingBehavior == StartingBehavior::RestartWithAppending && logFileHandle)
974 // Now we can start normal logging to the truncated log file.
975 fplog = gmx_fio_getfp(logFileHandle);
976 prepareLogAppending(fplog);
977 logOwner = buildLogger(fplog, cr);
978 mdlog = logOwner.logger();
982 if (mdrunOptions.numStepsCommandline > -2)
984 GMX_LOG(mdlog.info).asParagraph().
985 appendText("The -nsteps functionality is deprecated, and may be removed in a future version. "
986 "Consider using gmx convert-tpr -nsteps or changing the appropriate .mdp file field.");
988 /* override nsteps with value set on the commamdline */
989 override_nsteps_cmdline(mdlog, mdrunOptions.numStepsCommandline, inputrec);
991 if (SIMMASTER(cr))
993 copy_mat(globalState->box, box);
996 if (PAR(cr))
998 gmx_bcast(sizeof(box), box, cr);
1001 if (inputrec->cutoff_scheme != ecutsVERLET)
1003 gmx_fatal(FARGS, "This group-scheme .tpr file can no longer be run by mdrun. Please update to the Verlet scheme, or use an earlier version of GROMACS if necessary.");
1005 /* Update rlist and nstlist. */
1006 prepare_verlet_scheme(fplog, cr, inputrec, nstlist_cmdline, &mtop, box,
1007 useGpuForNonbonded || (emulateGpuNonbonded == EmulateGpuNonbonded::Yes), *hwinfo->cpuInfo);
1009 LocalAtomSetManager atomSets;
1010 if (PAR(cr) && !(EI_TPI(inputrec->eI) ||
1011 inputrec->eI == eiNM))
1013 cr->dd = init_domain_decomposition(mdlog, cr, domdecOptions, mdrunOptions,
1014 &mtop, inputrec,
1015 box, positionsFromStatePointer(globalState.get()),
1016 &atomSets);
1017 // Note that local state still does not exist yet.
1019 else
1021 /* PME, if used, is done on all nodes with 1D decomposition */
1022 cr->npmenodes = 0;
1023 cr->duty = (DUTY_PP | DUTY_PME);
1025 if (inputrec->ePBC == epbcSCREW)
1027 gmx_fatal(FARGS,
1028 "pbc=screw is only implemented with domain decomposition");
1032 if (PAR(cr))
1034 /* After possible communicator splitting in make_dd_communicators.
1035 * we can set up the intra/inter node communication.
1037 gmx_setup_nodecomm(fplog, cr);
1040 #if GMX_MPI
1041 if (isMultiSim(ms))
1043 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
1044 "This is simulation %d out of %d running as a composite GROMACS\n"
1045 "multi-simulation job. Setup for this simulation:\n",
1046 ms->sim, ms->nsim);
1048 GMX_LOG(mdlog.warning).appendTextFormatted(
1049 "Using %d MPI %s\n",
1050 cr->nnodes,
1051 #if GMX_THREAD_MPI
1052 cr->nnodes == 1 ? "thread" : "threads"
1053 #else
1054 cr->nnodes == 1 ? "process" : "processes"
1055 #endif
1057 fflush(stderr);
1058 #endif
1060 // If mdrun -pin auto honors any affinity setting that already
1061 // exists. If so, it is nice to provide feedback about whether
1062 // that existing affinity setting was from OpenMP or something
1063 // else, so we run this code both before and after we initialize
1064 // the OpenMP support.
1065 gmx_check_thread_affinity_set(mdlog,
1066 &hw_opt, hwinfo->nthreads_hw_avail, FALSE);
1067 /* Check and update the number of OpenMP threads requested */
1068 checkAndUpdateRequestedNumOpenmpThreads(&hw_opt, *hwinfo, cr, ms, physicalNodeComm.size_,
1069 pmeRunMode, mtop, *inputrec);
1071 gmx_omp_nthreads_init(mdlog, cr,
1072 hwinfo->nthreads_hw_avail,
1073 physicalNodeComm.size_,
1074 hw_opt.nthreads_omp,
1075 hw_opt.nthreads_omp_pme,
1076 !thisRankHasDuty(cr, DUTY_PP));
1078 // Enable FP exception detection, but not in
1079 // Release mode and not for compilers with known buggy FP
1080 // exception support (clang with any optimization) or suspected
1081 // buggy FP exception support (gcc 7.* with optimization).
1082 #if !defined NDEBUG && \
1083 !((defined __clang__ || (defined(__GNUC__) && !defined(__ICC) && __GNUC__ == 7)) \
1084 && defined __OPTIMIZE__)
1085 const bool bEnableFPE = true;
1086 #else
1087 const bool bEnableFPE = false;
1088 #endif
1089 //FIXME - reconcile with gmx_feenableexcept() call from CommandLineModuleManager::run()
1090 if (bEnableFPE)
1092 gmx_feenableexcept();
1095 // Build a data structure that expresses which kinds of non-bonded
1096 // task are handled by this rank.
1098 // TODO Later, this might become a loop over all registered modules
1099 // relevant to the mdp inputs, to find those that have such tasks.
1101 // TODO This could move before init_domain_decomposition() as part
1102 // of refactoring that separates the responsibility for duty
1103 // assignment from setup for communication between tasks, and
1104 // setup for tasks handled with a domain (ie including short-ranged
1105 // tasks, bonded tasks, etc.).
1107 // Note that in general useGpuForNonbonded, etc. can have a value
1108 // that is inconsistent with the presence of actual GPUs on any
1109 // rank, and that is not known to be a problem until the
1110 // duty of the ranks on a node become known.
1112 // TODO Later we might need the concept of computeTasksOnThisRank,
1113 // from which we construct gpuTasksOnThisRank.
1115 // Currently the DD code assigns duty to ranks that can
1116 // include PP work that currently can be executed on a single
1117 // GPU, if present and compatible. This has to be coordinated
1118 // across PP ranks on a node, with possible multiple devices
1119 // or sharing devices on a node, either from the user
1120 // selection, or automatically.
1121 auto haveGpus = !gpuIdsToUse.empty();
1122 std::vector<GpuTask> gpuTasksOnThisRank;
1123 if (thisRankHasDuty(cr, DUTY_PP))
1125 if (useGpuForNonbonded)
1127 // Note that any bonded tasks on a GPU always accompany a
1128 // non-bonded task.
1129 if (haveGpus)
1131 gpuTasksOnThisRank.push_back(GpuTask::Nonbonded);
1133 else if (nonbondedTarget == TaskTarget::Gpu)
1135 gmx_fatal(FARGS, "Cannot run short-ranged nonbonded interactions on a GPU because no GPU is detected.");
1137 else if (bondedTarget == TaskTarget::Gpu)
1139 gmx_fatal(FARGS, "Cannot run bonded interactions on a GPU because no GPU is detected.");
1143 // TODO cr->duty & DUTY_PME should imply that a PME algorithm is active, but currently does not.
1144 if (EEL_PME(inputrec->coulombtype) && (thisRankHasDuty(cr, DUTY_PME)))
1146 if (useGpuForPme)
1148 if (haveGpus)
1150 gpuTasksOnThisRank.push_back(GpuTask::Pme);
1152 else if (pmeTarget == TaskTarget::Gpu)
1154 gmx_fatal(FARGS, "Cannot run PME on a GPU because no GPU is detected.");
1159 GpuTaskAssignment gpuTaskAssignment;
1162 // Produce the task assignment for this rank.
1163 gpuTaskAssignment = runTaskAssignment(gpuIdsToUse, userGpuTaskAssignment, *hwinfo,
1164 mdlog, cr, ms, physicalNodeComm, gpuTasksOnThisRank,
1165 useGpuForBonded, pmeRunMode);
1167 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1169 /* Prevent other ranks from continuing after an issue was found
1170 * and reported as a fatal error.
1172 * TODO This function implements a barrier so that MPI runtimes
1173 * can organize an orderly shutdown if one of the ranks has had to
1174 * issue a fatal error in various code already run. When we have
1175 * MPI-aware error handling and reporting, this should be
1176 * improved. */
1177 #if GMX_MPI
1178 if (PAR(cr))
1180 MPI_Barrier(cr->mpi_comm_mysim);
1182 if (isMultiSim(ms))
1184 if (SIMMASTER(cr))
1186 MPI_Barrier(ms->mpi_comm_masters);
1188 /* We need another barrier to prevent non-master ranks from contiuing
1189 * when an error occured in a different simulation.
1191 MPI_Barrier(cr->mpi_comm_mysim);
1193 #endif
1195 /* Now that we know the setup is consistent, check for efficiency */
1196 check_resource_division_efficiency(hwinfo, !gpuTaskAssignment.empty(), mdrunOptions.ntompOptionIsSet,
1197 cr, mdlog);
1199 gmx_device_info_t *nonbondedDeviceInfo = nullptr;
1201 if (thisRankHasDuty(cr, DUTY_PP))
1203 // This works because only one task of each type is currently permitted.
1204 auto nbGpuTaskMapping = std::find_if(gpuTaskAssignment.begin(), gpuTaskAssignment.end(),
1205 hasTaskType<GpuTask::Nonbonded>);
1206 if (nbGpuTaskMapping != gpuTaskAssignment.end())
1208 int nonbondedDeviceId = nbGpuTaskMapping->deviceId_;
1209 nonbondedDeviceInfo = getDeviceInfo(hwinfo->gpu_info, nonbondedDeviceId);
1210 init_gpu(nonbondedDeviceInfo);
1212 if (DOMAINDECOMP(cr))
1214 /* When we share GPUs over ranks, we need to know this for the DLB */
1215 dd_setup_dlb_resource_sharing(cr, nonbondedDeviceId);
1221 gmx_device_info_t *pmeDeviceInfo = nullptr;
1222 // Later, this program could contain kernels that might be later
1223 // re-used as auto-tuning progresses, or subsequent simulations
1224 // are invoked.
1225 PmeGpuProgramStorage pmeGpuProgram;
1226 // This works because only one task of each type is currently permitted.
1227 auto pmeGpuTaskMapping = std::find_if(gpuTaskAssignment.begin(), gpuTaskAssignment.end(), hasTaskType<GpuTask::Pme>);
1228 const bool thisRankHasPmeGpuTask = (pmeGpuTaskMapping != gpuTaskAssignment.end());
1229 if (thisRankHasPmeGpuTask)
1231 pmeDeviceInfo = getDeviceInfo(hwinfo->gpu_info, pmeGpuTaskMapping->deviceId_);
1232 init_gpu(pmeDeviceInfo);
1233 pmeGpuProgram = buildPmeGpuProgram(pmeDeviceInfo);
1236 /* getting number of PP/PME threads on this MPI / tMPI rank.
1237 PME: env variable should be read only on one node to make sure it is
1238 identical everywhere;
1240 const int numThreadsOnThisRank =
1241 thisRankHasDuty(cr, DUTY_PP) ? gmx_omp_nthreads_get(emntNonbonded) : gmx_omp_nthreads_get(emntPME);
1242 checkHardwareOversubscription(numThreadsOnThisRank, cr->nodeid,
1243 *hwinfo->hardwareTopology,
1244 physicalNodeComm, mdlog);
1246 if (hw_opt.threadAffinity != ThreadAffinity::Off)
1248 /* Before setting affinity, check whether the affinity has changed
1249 * - which indicates that probably the OpenMP library has changed it
1250 * since we first checked).
1252 gmx_check_thread_affinity_set(mdlog,
1253 &hw_opt, hwinfo->nthreads_hw_avail, TRUE);
1255 int numThreadsOnThisNode, intraNodeThreadOffset;
1256 analyzeThreadsOnThisNode(physicalNodeComm, numThreadsOnThisRank, &numThreadsOnThisNode,
1257 &intraNodeThreadOffset);
1259 /* Set the CPU affinity */
1260 gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo->hardwareTopology,
1261 numThreadsOnThisRank, numThreadsOnThisNode,
1262 intraNodeThreadOffset, nullptr);
1265 if (mdrunOptions.timingOptions.resetStep > -1)
1267 GMX_LOG(mdlog.info).asParagraph().
1268 appendText("The -resetstep functionality is deprecated, and may be removed in a future version.");
1270 wcycle = wallcycle_init(fplog, mdrunOptions.timingOptions.resetStep, cr);
1272 if (PAR(cr))
1274 /* Master synchronizes its value of reset_counters with all nodes
1275 * including PME only nodes */
1276 int64_t reset_counters = wcycle_get_reset_counters(wcycle);
1277 gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
1278 wcycle_set_reset_counters(wcycle, reset_counters);
1281 // Membrane embedding must be initialized before we call init_forcerec()
1282 if (doMembed)
1284 if (MASTER(cr))
1286 fprintf(stderr, "Initializing membed");
1288 /* Note that membed cannot work in parallel because mtop is
1289 * changed here. Fix this if we ever want to make it run with
1290 * multiple ranks. */
1291 membed = init_membed(fplog, filenames.size(), filenames.data(), &mtop, inputrec, globalState.get(), cr,
1292 &mdrunOptions
1293 .checkpointOptions.period);
1296 std::unique_ptr<MDAtoms> mdAtoms;
1297 std::unique_ptr<gmx_vsite_t> vsite;
1299 t_nrnb nrnb;
1300 if (thisRankHasDuty(cr, DUTY_PP))
1302 mdModulesNotifier.notify(*cr);
1303 mdModulesNotifier.notify(&atomSets);
1304 mdModulesNotifier.notify(PeriodicBoundaryConditionType {inputrec->ePBC});
1305 /* Initiate forcerecord */
1306 fr = new t_forcerec;
1307 fr->forceProviders = mdModules_->initForceProviders();
1308 init_forcerec(fplog, mdlog, fr, fcd,
1309 inputrec, &mtop, cr, box,
1310 opt2fn("-table", filenames.size(), filenames.data()),
1311 opt2fn("-tablep", filenames.size(), filenames.data()),
1312 opt2fns("-tableb", filenames.size(), filenames.data()),
1313 *hwinfo, nonbondedDeviceInfo,
1314 useGpuForBonded,
1315 FALSE,
1316 pforce,
1317 wcycle);
1319 /* Initialize the mdAtoms structure.
1320 * mdAtoms is not filled with atom data,
1321 * as this can not be done now with domain decomposition.
1323 mdAtoms = makeMDAtoms(fplog, mtop, *inputrec, thisRankHasPmeGpuTask);
1324 if (globalState && thisRankHasPmeGpuTask)
1326 // The pinning of coordinates in the global state object works, because we only use
1327 // PME on GPU without DD or on a separate PME rank, and because the local state pointer
1328 // points to the global state object without DD.
1329 // FIXME: MD and EM separately set up the local state - this should happen in the same function,
1330 // which should also perform the pinning.
1331 changePinningPolicy(&globalState->x, pme_get_pinning_policy());
1334 /* Initialize the virtual site communication */
1335 vsite = initVsite(mtop, cr);
1337 calc_shifts(box, fr->shift_vec);
1339 /* With periodic molecules the charge groups should be whole at start up
1340 * and the virtual sites should not be far from their proper positions.
1342 if (!inputrec->bContinuation && MASTER(cr) &&
1343 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
1345 /* Make molecules whole at start of run */
1346 if (fr->ePBC != epbcNONE)
1348 do_pbc_first_mtop(fplog, inputrec->ePBC, box, &mtop, globalState->x.rvec_array());
1350 if (vsite)
1352 /* Correct initial vsite positions are required
1353 * for the initial distribution in the domain decomposition
1354 * and for the initial shell prediction.
1356 constructVsitesGlobal(mtop, globalState->x);
1360 if (EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
1362 ewaldcoeff_q = fr->ic->ewaldcoeff_q;
1363 ewaldcoeff_lj = fr->ic->ewaldcoeff_lj;
1366 else
1368 /* This is a PME only node */
1370 GMX_ASSERT(globalState == nullptr, "We don't need the state on a PME only rank and expect it to be unitialized");
1372 ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol);
1373 ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj);
1376 gmx_pme_t *sepPmeData = nullptr;
1377 // This reference hides the fact that PME data is owned by runner on PME-only ranks and by forcerec on other ranks
1378 GMX_ASSERT(thisRankHasDuty(cr, DUTY_PP) == (fr != nullptr), "Double-checking that only PME-only ranks have no forcerec");
1379 gmx_pme_t * &pmedata = fr ? fr->pmedata : sepPmeData;
1381 /* Initiate PME if necessary,
1382 * either on all nodes or on dedicated PME nodes only. */
1383 if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
1385 if (mdAtoms && mdAtoms->mdatoms())
1387 nChargePerturbed = mdAtoms->mdatoms()->nChargePerturbed;
1388 if (EVDW_PME(inputrec->vdwtype))
1390 nTypePerturbed = mdAtoms->mdatoms()->nTypePerturbed;
1393 if (cr->npmenodes > 0)
1395 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1396 gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
1397 gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
1400 if (thisRankHasDuty(cr, DUTY_PME))
1404 pmedata = gmx_pme_init(cr,
1405 getNumPmeDomains(cr->dd),
1406 inputrec,
1407 nChargePerturbed != 0, nTypePerturbed != 0,
1408 mdrunOptions.reproducible,
1409 ewaldcoeff_q, ewaldcoeff_lj,
1410 gmx_omp_nthreads_get(emntPME),
1411 pmeRunMode, nullptr,
1412 pmeDeviceInfo, pmeGpuProgram.get(), mdlog);
1414 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1419 if (EI_DYNAMICS(inputrec->eI))
1421 /* Turn on signal handling on all nodes */
1423 * (A user signal from the PME nodes (if any)
1424 * is communicated to the PP nodes.
1426 signal_handler_install();
1429 pull_t *pull_work = nullptr;
1430 if (thisRankHasDuty(cr, DUTY_PP))
1432 /* Assumes uniform use of the number of OpenMP threads */
1433 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
1435 if (inputrec->bPull)
1437 /* Initialize pull code */
1438 pull_work =
1439 init_pull(fplog, inputrec->pull, inputrec,
1440 &mtop, cr, &atomSets, inputrec->fepvals->init_lambda);
1441 if (inputrec->pull->bXOutAverage || inputrec->pull->bFOutAverage)
1443 initPullHistory(pull_work, &observablesHistory);
1445 if (EI_DYNAMICS(inputrec->eI) && MASTER(cr))
1447 init_pull_output_files(pull_work,
1448 filenames.size(), filenames.data(), oenv,
1449 startingBehavior);
1453 std::unique_ptr<EnforcedRotation> enforcedRotation;
1454 if (inputrec->bRot)
1456 /* Initialize enforced rotation code */
1457 enforcedRotation = init_rot(fplog,
1458 inputrec,
1459 filenames.size(),
1460 filenames.data(),
1462 &atomSets,
1463 globalState.get(),
1464 &mtop,
1465 oenv,
1466 mdrunOptions,
1467 startingBehavior);
1470 t_swap *swap = nullptr;
1471 if (inputrec->eSwapCoords != eswapNO)
1473 /* Initialize ion swapping code */
1474 swap = init_swapcoords(fplog, inputrec,
1475 opt2fn_master("-swap", filenames.size(), filenames.data(), cr),
1476 &mtop, globalState.get(), &observablesHistory,
1477 cr, &atomSets, oenv, mdrunOptions,
1478 startingBehavior);
1481 /* Let makeConstraints know whether we have essential dynamics constraints.
1482 * TODO: inputrec should tell us whether we use an algorithm, not a file option or the checkpoint
1484 bool doEssentialDynamics = (opt2fn_null("-ei", filenames.size(), filenames.data()) != nullptr
1485 || observablesHistory.edsamHistory);
1486 auto constr = makeConstraints(mtop, *inputrec, pull_work, doEssentialDynamics,
1487 fplog, *mdAtoms->mdatoms(),
1488 cr, ms, &nrnb, wcycle, fr->bMolPBC);
1490 /* Energy terms and groups */
1491 gmx_enerdata_t enerd(mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size(), inputrec->fepvals->n_lambda);
1493 /* Kinetic energy data */
1494 gmx_ekindata_t ekind;
1495 init_ekindata(fplog, &mtop, &(inputrec->opts), &ekind, inputrec->cos_accel);
1497 /* Set up interactive MD (IMD) */
1498 auto imdSession = makeImdSession(inputrec, cr, wcycle, &enerd, ms, &mtop, mdlog,
1499 MASTER(cr) ? globalState->x.rvec_array() : nullptr,
1500 filenames.size(), filenames.data(), oenv, mdrunOptions.imdOptions,
1501 startingBehavior);
1503 if (DOMAINDECOMP(cr))
1505 GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP");
1506 /* This call is not included in init_domain_decomposition mainly
1507 * because fr->cginfo_mb is set later.
1509 dd_init_bondeds(fplog, cr->dd, &mtop, vsite.get(), inputrec,
1510 domdecOptions.checkBondedInteractions,
1511 fr->cginfo_mb);
1514 // TODO This is not the right place to manage the lifetime of
1515 // this data structure, but currently it's the easiest way to
1516 // make it work.
1517 MdScheduleWorkload mdScheduleWork;
1519 GMX_ASSERT(stopHandlerBuilder_, "Runner must provide StopHandlerBuilder to simulator.");
1520 SimulatorBuilder simulatorBuilder;
1522 // build and run simulator object based on user-input
1523 auto simulator = simulatorBuilder.build(
1524 fplog, cr, ms, mdlog, static_cast<int>(filenames.size()), filenames.data(),
1525 oenv,
1526 mdrunOptions,
1527 startingBehavior,
1528 vsite.get(), constr.get(),
1529 enforcedRotation ? enforcedRotation->getLegacyEnfrot() : nullptr,
1530 deform.get(),
1531 mdModules_->outputProvider(),
1532 mdModules_->notifier(),
1533 inputrec, imdSession.get(), pull_work, swap, &mtop,
1534 fcd,
1535 globalState.get(),
1536 &observablesHistory,
1537 mdAtoms.get(), &nrnb, wcycle, fr,
1538 &enerd,
1539 &ekind,
1540 &mdScheduleWork,
1541 replExParams,
1542 membed,
1543 walltime_accounting,
1544 std::move(stopHandlerBuilder_),
1545 doRerun);
1546 simulator->run();
1548 if (inputrec->bPull)
1550 finish_pull(pull_work);
1552 finish_swapcoords(swap);
1554 else
1556 GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
1557 /* do PME only */
1558 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
1559 gmx_pmeonly(pmedata, cr, &nrnb, wcycle, walltime_accounting, inputrec, pmeRunMode);
1562 wallcycle_stop(wcycle, ewcRUN);
1564 /* Finish up, write some stuff
1565 * if rerunMD, don't write last frame again
1567 finish_run(fplog, mdlog, cr,
1568 inputrec, &nrnb, wcycle, walltime_accounting,
1569 fr ? fr->nbv.get() : nullptr,
1570 pmedata,
1571 EI_DYNAMICS(inputrec->eI) && !isMultiSim(ms));
1573 // clean up cycle counter
1574 wallcycle_destroy(wcycle);
1576 // Free PME data
1577 if (pmedata)
1579 gmx_pme_destroy(pmedata);
1580 pmedata = nullptr;
1583 // FIXME: this is only here to manually unpin mdAtoms->chargeA_ and state->x,
1584 // before we destroy the GPU context(s) in free_gpu_resources().
1585 // Pinned buffers are associated with contexts in CUDA.
1586 // As soon as we destroy GPU contexts after mdrunner() exits, these lines should go.
1587 mdAtoms.reset(nullptr);
1588 globalState.reset(nullptr);
1589 mdModules_.reset(nullptr); // destruct force providers here as they might also use the GPU
1591 /* Free GPU memory and set a physical node tMPI barrier (which should eventually go away) */
1592 free_gpu_resources(fr, physicalNodeComm, hwinfo->gpu_info);
1593 free_gpu(nonbondedDeviceInfo);
1594 free_gpu(pmeDeviceInfo);
1595 done_forcerec(fr, mtop.molblock.size());
1596 sfree(fcd);
1598 if (doMembed)
1600 free_membed(membed);
1603 /* Does what it says */
1604 print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
1605 walltime_accounting_destroy(walltime_accounting);
1607 // Ensure log file content is written
1608 if (logFileHandle)
1610 gmx_fio_flush(logFileHandle);
1613 /* Reset FPEs (important for unit tests) by disabling them. Assumes no
1614 * exceptions were enabled before function was called. */
1615 if (bEnableFPE)
1617 gmx_fedisableexcept();
1620 auto rc = static_cast<int>(gmx_get_stop_condition());
1622 #if GMX_THREAD_MPI
1623 /* we need to join all threads. The sub-threads join when they
1624 exit this function, but the master thread needs to be told to
1625 wait for that. */
1626 if (PAR(cr) && MASTER(cr))
1628 tMPI_Finalize();
1630 #endif
1631 return rc;
1634 Mdrunner::~Mdrunner()
1636 // Clean up of the Manager.
1637 // This will end up getting called on every thread-MPI rank, which is unnecessary,
1638 // but okay as long as threads synchronize some time before adding or accessing
1639 // a new set of restraints.
1640 if (restraintManager_)
1642 restraintManager_->clear();
1643 GMX_ASSERT(restraintManager_->countRestraints() == 0,
1644 "restraints added during runner life time should be cleared at runner destruction.");
1648 void Mdrunner::addPotential(std::shared_ptr<gmx::IRestraintPotential> puller,
1649 const std::string &name)
1651 GMX_ASSERT(restraintManager_, "Mdrunner must have a restraint manager.");
1652 // Not sure if this should be logged through the md logger or something else,
1653 // but it is helpful to have some sort of INFO level message sent somewhere.
1654 // std::cout << "Registering restraint named " << name << std::endl;
1656 // When multiple restraints are used, it may be wasteful to register them separately.
1657 // Maybe instead register an entire Restraint Manager as a force provider.
1658 restraintManager_->addToSpec(std::move(puller),
1659 name);
1662 Mdrunner::Mdrunner(std::unique_ptr<MDModules> mdModules)
1663 : mdModules_(std::move(mdModules))
1667 Mdrunner::Mdrunner(Mdrunner &&) noexcept = default;
1669 //NOLINTNEXTLINE(performance-noexcept-move-constructor) working around GCC bug 58265
1670 Mdrunner &Mdrunner::operator=(Mdrunner && /*handle*/) noexcept(BUGFREE_NOEXCEPT_STRING) = default;
1672 class Mdrunner::BuilderImplementation
1674 public:
1675 BuilderImplementation() = delete;
1676 BuilderImplementation(std::unique_ptr<MDModules> mdModules);
1677 ~BuilderImplementation();
1679 BuilderImplementation &setExtraMdrunOptions(const MdrunOptions &options,
1680 real forceWarningThreshold,
1681 StartingBehavior startingBehavior);
1683 void addDomdec(const DomdecOptions &options);
1685 void addVerletList(int nstlist);
1687 void addReplicaExchange(const ReplicaExchangeParameters &params);
1689 void addMultiSim(gmx_multisim_t* multisim);
1691 void addCommunicationRecord(t_commrec *cr);
1693 void addNonBonded(const char* nbpu_opt);
1695 void addPME(const char* pme_opt_, const char* pme_fft_opt_);
1697 void addBondedTaskAssignment(const char* bonded_opt);
1699 void addHardwareOptions(const gmx_hw_opt_t &hardwareOptions);
1701 void addFilenames(ArrayRef <const t_filenm> filenames);
1703 void addOutputEnvironment(gmx_output_env_t* outputEnvironment);
1705 void addLogFile(t_fileio *logFileHandle);
1707 void addStopHandlerBuilder(std::unique_ptr<StopHandlerBuilder> builder);
1709 Mdrunner build();
1711 private:
1713 // Default parameters copied from runner.h
1714 // \todo Clarify source(s) of default parameters.
1716 const char* nbpu_opt_ = nullptr;
1717 const char* pme_opt_ = nullptr;
1718 const char* pme_fft_opt_ = nullptr;
1719 const char *bonded_opt_ = nullptr;
1721 MdrunOptions mdrunOptions_;
1723 DomdecOptions domdecOptions_;
1725 ReplicaExchangeParameters replicaExchangeParameters_;
1727 //! Command-line override for the duration of a neighbor list with the Verlet scheme.
1728 int nstlist_ = 0;
1730 //! Multisim communicator handle.
1731 std::unique_ptr<gmx_multisim_t*> multisim_ = nullptr;
1733 //! Non-owning communication record (only used when building command-line mdrun)
1734 t_commrec *communicationRecord_ = nullptr;
1736 //! Print a warning if any force is larger than this (in kJ/mol nm).
1737 real forceWarningThreshold_ = -1;
1739 //! Whether the simulation will start afresh, or restart with/without appending.
1740 StartingBehavior startingBehavior_ = StartingBehavior::NewSimulation;
1742 //! The modules that comprise the functionality of mdrun.
1743 std::unique_ptr<MDModules> mdModules_;
1745 //! \brief Parallelism information.
1746 gmx_hw_opt_t hardwareOptions_;
1748 //! filename options for simulation.
1749 ArrayRef<const t_filenm> filenames_;
1751 /*! \brief Handle to output environment.
1753 * \todo gmx_output_env_t needs lifetime management.
1755 gmx_output_env_t* outputEnvironment_ = nullptr;
1757 /*! \brief Non-owning handle to MD log file.
1759 * \todo Context should own output facilities for client.
1760 * \todo Improve log file handle management.
1761 * \internal
1762 * Code managing the FILE* relies on the ability to set it to
1763 * nullptr to check whether the filehandle is valid.
1765 t_fileio* logFileHandle_ = nullptr;
1768 * \brief Builder for simulation stop signal handler.
1770 std::unique_ptr<StopHandlerBuilder> stopHandlerBuilder_ = nullptr;
1773 Mdrunner::BuilderImplementation::BuilderImplementation(std::unique_ptr<MDModules> mdModules) :
1774 mdModules_(std::move(mdModules))
1778 Mdrunner::BuilderImplementation::~BuilderImplementation() = default;
1780 Mdrunner::BuilderImplementation &
1781 Mdrunner::BuilderImplementation::setExtraMdrunOptions(const MdrunOptions &options,
1782 const real forceWarningThreshold,
1783 const StartingBehavior startingBehavior)
1785 mdrunOptions_ = options;
1786 forceWarningThreshold_ = forceWarningThreshold;
1787 startingBehavior_ = startingBehavior;
1788 return *this;
1791 void Mdrunner::BuilderImplementation::addDomdec(const DomdecOptions &options)
1793 domdecOptions_ = options;
1796 void Mdrunner::BuilderImplementation::addVerletList(int nstlist)
1798 nstlist_ = nstlist;
1801 void Mdrunner::BuilderImplementation::addReplicaExchange(const ReplicaExchangeParameters &params)
1803 replicaExchangeParameters_ = params;
1806 void Mdrunner::BuilderImplementation::addMultiSim(gmx_multisim_t* multisim)
1808 multisim_ = std::make_unique<gmx_multisim_t*>(multisim);
1811 void Mdrunner::BuilderImplementation::addCommunicationRecord(t_commrec *cr)
1813 communicationRecord_ = cr;
1816 Mdrunner Mdrunner::BuilderImplementation::build()
1818 auto newRunner = Mdrunner(std::move(mdModules_));
1820 newRunner.mdrunOptions = mdrunOptions_;
1821 newRunner.pforce = forceWarningThreshold_;
1822 newRunner.startingBehavior = startingBehavior_;
1823 newRunner.domdecOptions = domdecOptions_;
1825 // \todo determine an invariant to check or confirm that all gmx_hw_opt_t objects are valid
1826 newRunner.hw_opt = hardwareOptions_;
1828 // No invariant to check. This parameter exists to optionally override other behavior.
1829 newRunner.nstlist_cmdline = nstlist_;
1831 newRunner.replExParams = replicaExchangeParameters_;
1833 newRunner.filenames = filenames_;
1835 GMX_ASSERT(communicationRecord_, "Bug found. It should not be possible to call build() without a valid communicationRecord_.");
1836 newRunner.cr = communicationRecord_;
1838 if (multisim_)
1840 // nullptr is a valid value for the multisim handle, so we don't check the pointed-to pointer.
1841 newRunner.ms = *multisim_;
1843 else
1845 GMX_THROW(gmx::APIError("MdrunnerBuilder::addMultiSim() is required before build()"));
1848 // \todo Clarify ownership and lifetime management for gmx_output_env_t
1849 // \todo Update sanity checking when output environment has clearly specified invariants.
1850 // Initialization and default values for oenv are not well specified in the current version.
1851 if (outputEnvironment_)
1853 newRunner.oenv = outputEnvironment_;
1855 else
1857 GMX_THROW(gmx::APIError("MdrunnerBuilder::addOutputEnvironment() is required before build()"));
1860 newRunner.logFileHandle = logFileHandle_;
1862 if (nbpu_opt_)
1864 newRunner.nbpu_opt = nbpu_opt_;
1866 else
1868 GMX_THROW(gmx::APIError("MdrunnerBuilder::addNonBonded() is required before build()"));
1871 if (pme_opt_ && pme_fft_opt_)
1873 newRunner.pme_opt = pme_opt_;
1874 newRunner.pme_fft_opt = pme_fft_opt_;
1876 else
1878 GMX_THROW(gmx::APIError("MdrunnerBuilder::addElectrostatics() is required before build()"));
1881 if (bonded_opt_)
1883 newRunner.bonded_opt = bonded_opt_;
1885 else
1887 GMX_THROW(gmx::APIError("MdrunnerBuilder::addBondedTaskAssignment() is required before build()"));
1890 newRunner.restraintManager_ = std::make_unique<gmx::RestraintManager>();
1892 if (stopHandlerBuilder_)
1894 newRunner.stopHandlerBuilder_ = std::move(stopHandlerBuilder_);
1896 else
1898 newRunner.stopHandlerBuilder_ = std::make_unique<StopHandlerBuilder>();
1901 return newRunner;
1904 void Mdrunner::BuilderImplementation::addNonBonded(const char* nbpu_opt)
1906 nbpu_opt_ = nbpu_opt;
1909 void Mdrunner::BuilderImplementation::addPME(const char* pme_opt,
1910 const char* pme_fft_opt)
1912 pme_opt_ = pme_opt;
1913 pme_fft_opt_ = pme_fft_opt;
1916 void Mdrunner::BuilderImplementation::addBondedTaskAssignment(const char* bonded_opt)
1918 bonded_opt_ = bonded_opt;
1921 void Mdrunner::BuilderImplementation::addHardwareOptions(const gmx_hw_opt_t &hardwareOptions)
1923 hardwareOptions_ = hardwareOptions;
1926 void Mdrunner::BuilderImplementation::addFilenames(ArrayRef<const t_filenm> filenames)
1928 filenames_ = filenames;
1931 void Mdrunner::BuilderImplementation::addOutputEnvironment(gmx_output_env_t* outputEnvironment)
1933 outputEnvironment_ = outputEnvironment;
1936 void Mdrunner::BuilderImplementation::addLogFile(t_fileio *logFileHandle)
1938 logFileHandle_ = logFileHandle;
1941 void Mdrunner::BuilderImplementation::addStopHandlerBuilder(std::unique_ptr<StopHandlerBuilder> builder)
1943 stopHandlerBuilder_ = std::move(builder);
1946 MdrunnerBuilder::MdrunnerBuilder(std::unique_ptr<MDModules> mdModules) :
1947 impl_ {std::make_unique<Mdrunner::BuilderImplementation>(std::move(mdModules))}
1951 MdrunnerBuilder::~MdrunnerBuilder() = default;
1953 MdrunnerBuilder &MdrunnerBuilder::addSimulationMethod(const MdrunOptions &options,
1954 real forceWarningThreshold,
1955 const StartingBehavior startingBehavior)
1957 impl_->setExtraMdrunOptions(options, forceWarningThreshold, startingBehavior);
1958 return *this;
1961 MdrunnerBuilder &MdrunnerBuilder::addDomainDecomposition(const DomdecOptions &options)
1963 impl_->addDomdec(options);
1964 return *this;
1967 MdrunnerBuilder &MdrunnerBuilder::addNeighborList(int nstlist)
1969 impl_->addVerletList(nstlist);
1970 return *this;
1973 MdrunnerBuilder &MdrunnerBuilder::addReplicaExchange(const ReplicaExchangeParameters &params)
1975 impl_->addReplicaExchange(params);
1976 return *this;
1979 MdrunnerBuilder &MdrunnerBuilder::addMultiSim(gmx_multisim_t* multisim)
1981 impl_->addMultiSim(multisim);
1982 return *this;
1985 MdrunnerBuilder &MdrunnerBuilder::addCommunicationRecord(t_commrec *cr)
1987 impl_->addCommunicationRecord(cr);
1988 return *this;
1991 MdrunnerBuilder &MdrunnerBuilder::addNonBonded(const char* nbpu_opt)
1993 impl_->addNonBonded(nbpu_opt);
1994 return *this;
1997 MdrunnerBuilder &MdrunnerBuilder::addElectrostatics(const char* pme_opt,
1998 const char* pme_fft_opt)
2000 // The builder method may become more general in the future, but in this version,
2001 // parameters for PME electrostatics are both required and the only parameters
2002 // available.
2003 if (pme_opt && pme_fft_opt)
2005 impl_->addPME(pme_opt, pme_fft_opt);
2007 else
2009 GMX_THROW(gmx::InvalidInputError("addElectrostatics() arguments must be non-null pointers."));
2011 return *this;
2014 MdrunnerBuilder &MdrunnerBuilder::addBondedTaskAssignment(const char* bonded_opt)
2016 impl_->addBondedTaskAssignment(bonded_opt);
2017 return *this;
2020 Mdrunner MdrunnerBuilder::build()
2022 return impl_->build();
2025 MdrunnerBuilder &MdrunnerBuilder::addHardwareOptions(const gmx_hw_opt_t &hardwareOptions)
2027 impl_->addHardwareOptions(hardwareOptions);
2028 return *this;
2031 MdrunnerBuilder &MdrunnerBuilder::addFilenames(ArrayRef<const t_filenm> filenames)
2033 impl_->addFilenames(filenames);
2034 return *this;
2037 MdrunnerBuilder &MdrunnerBuilder::addOutputEnvironment(gmx_output_env_t* outputEnvironment)
2039 impl_->addOutputEnvironment(outputEnvironment);
2040 return *this;
2043 MdrunnerBuilder &MdrunnerBuilder::addLogFile(t_fileio *logFileHandle)
2045 impl_->addLogFile(logFileHandle);
2046 return *this;
2049 MdrunnerBuilder &MdrunnerBuilder::addStopHandlerBuilder(std::unique_ptr<StopHandlerBuilder> builder)
2051 impl_->addStopHandlerBuilder(std::move(builder));
2052 return *this;
2055 MdrunnerBuilder::MdrunnerBuilder(MdrunnerBuilder &&) noexcept = default;
2057 MdrunnerBuilder &MdrunnerBuilder::operator=(MdrunnerBuilder &&) noexcept = default;
2059 } // namespace gmx