Move computeSlowForces into stepWork
[gromacs.git] / src / gromacs / mdlib / sim_util.cpp
blob2af9a25280dafb8e2baa4b3bb74ace3919039c2e
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2019,2020, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "config.h"
41 #include <cmath>
42 #include <cstdint>
43 #include <cstdio>
44 #include <cstring>
46 #include <array>
47 #include <optional>
49 #include "gromacs/applied_forces/awh/awh.h"
50 #include "gromacs/domdec/dlbtiming.h"
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/domdec/gpuhaloexchange.h"
54 #include "gromacs/domdec/partition.h"
55 #include "gromacs/essentialdynamics/edsam.h"
56 #include "gromacs/ewald/pme.h"
57 #include "gromacs/ewald/pme_pp.h"
58 #include "gromacs/ewald/pme_pp_comm_gpu.h"
59 #include "gromacs/gmxlib/network.h"
60 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
61 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
62 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/imd/imd.h"
65 #include "gromacs/listed_forces/disre.h"
66 #include "gromacs/listed_forces/gpubonded.h"
67 #include "gromacs/listed_forces/listed_forces.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/dispersioncorrection.h"
78 #include "gromacs/mdlib/enerdata_utils.h"
79 #include "gromacs/mdlib/force.h"
80 #include "gromacs/mdlib/force_flags.h"
81 #include "gromacs/mdlib/forcerec.h"
82 #include "gromacs/mdlib/gmx_omp_nthreads.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/vsite.h"
85 #include "gromacs/mdlib/wall.h"
86 #include "gromacs/mdlib/wholemoleculetransform.h"
87 #include "gromacs/mdtypes/commrec.h"
88 #include "gromacs/mdtypes/enerdata.h"
89 #include "gromacs/mdtypes/forcebuffers.h"
90 #include "gromacs/mdtypes/forceoutput.h"
91 #include "gromacs/mdtypes/forcerec.h"
92 #include "gromacs/mdtypes/iforceprovider.h"
93 #include "gromacs/mdtypes/inputrec.h"
94 #include "gromacs/mdtypes/md_enums.h"
95 #include "gromacs/mdtypes/mdatom.h"
96 #include "gromacs/mdtypes/multipletimestepping.h"
97 #include "gromacs/mdtypes/simulation_workload.h"
98 #include "gromacs/mdtypes/state.h"
99 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
100 #include "gromacs/nbnxm/gpu_data_mgmt.h"
101 #include "gromacs/nbnxm/nbnxm.h"
102 #include "gromacs/nbnxm/nbnxm_gpu.h"
103 #include "gromacs/pbcutil/ishift.h"
104 #include "gromacs/pbcutil/pbc.h"
105 #include "gromacs/pulling/pull.h"
106 #include "gromacs/pulling/pull_rotation.h"
107 #include "gromacs/timing/cyclecounter.h"
108 #include "gromacs/timing/gpu_timing.h"
109 #include "gromacs/timing/wallcycle.h"
110 #include "gromacs/timing/wallcyclereporting.h"
111 #include "gromacs/timing/walltime_accounting.h"
112 #include "gromacs/topology/topology.h"
113 #include "gromacs/utility/arrayref.h"
114 #include "gromacs/utility/basedefinitions.h"
115 #include "gromacs/utility/cstringutil.h"
116 #include "gromacs/utility/exceptions.h"
117 #include "gromacs/utility/fatalerror.h"
118 #include "gromacs/utility/fixedcapacityvector.h"
119 #include "gromacs/utility/gmxassert.h"
120 #include "gromacs/utility/gmxmpi.h"
121 #include "gromacs/utility/logger.h"
122 #include "gromacs/utility/smalloc.h"
123 #include "gromacs/utility/strconvert.h"
124 #include "gromacs/utility/sysinfo.h"
126 #include "gpuforcereduction.h"
128 using gmx::ArrayRef;
129 using gmx::AtomLocality;
130 using gmx::DomainLifetimeWorkload;
131 using gmx::ForceOutputs;
132 using gmx::ForceWithShiftForces;
133 using gmx::InteractionLocality;
134 using gmx::RVec;
135 using gmx::SimulationWorkload;
136 using gmx::StepWorkload;
138 // TODO: this environment variable allows us to verify before release
139 // that on less common architectures the total cost of polling is not larger than
140 // a blocking wait (so polling does not introduce overhead when the static
141 // PME-first ordering would suffice).
142 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
144 static void sum_forces(ArrayRef<RVec> f, ArrayRef<const RVec> forceToAdd)
146 GMX_ASSERT(f.size() >= forceToAdd.size(), "Accumulation buffer should be sufficiently large");
147 const int end = forceToAdd.size();
149 int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
150 #pragma omp parallel for num_threads(nt) schedule(static)
151 for (int i = 0; i < end; i++)
153 rvec_inc(f[i], forceToAdd[i]);
157 static void calc_virial(int start,
158 int homenr,
159 const rvec x[],
160 const gmx::ForceWithShiftForces& forceWithShiftForces,
161 tensor vir_part,
162 const matrix box,
163 t_nrnb* nrnb,
164 const t_forcerec* fr,
165 PbcType pbcType)
167 /* The short-range virial from surrounding boxes */
168 const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
169 calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, pbcType == PbcType::Screw, box);
170 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
172 /* Calculate partial virial, for local atoms only, based on short range.
173 * Total virial is computed in global_stat, called from do_md
175 const rvec* f = as_rvec_array(forceWithShiftForces.force().data());
176 f_calc_vir(start, start + homenr, x, f, vir_part, box);
177 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
179 if (debug)
181 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
185 static void pull_potential_wrapper(const t_commrec* cr,
186 const t_inputrec* ir,
187 const matrix box,
188 gmx::ArrayRef<const gmx::RVec> x,
189 gmx::ForceWithVirial* force,
190 const t_mdatoms* mdatoms,
191 gmx_enerdata_t* enerd,
192 pull_t* pull_work,
193 const real* lambda,
194 double t,
195 gmx_wallcycle_t wcycle)
197 t_pbc pbc;
198 real dvdl;
200 /* Calculate the center of mass forces, this requires communication,
201 * which is why pull_potential is called close to other communication.
203 wallcycle_start(wcycle, ewcPULLPOT);
204 set_pbc(&pbc, ir->pbcType, box);
205 dvdl = 0;
206 enerd->term[F_COM_PULL] +=
207 pull_potential(pull_work, mdatoms->massT, &pbc, cr, t, lambda[efptRESTRAINT],
208 as_rvec_array(x.data()), force, &dvdl);
209 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
210 wallcycle_stop(wcycle, ewcPULLPOT);
213 static void pme_receive_force_ener(t_forcerec* fr,
214 const t_commrec* cr,
215 gmx::ForceWithVirial* forceWithVirial,
216 gmx_enerdata_t* enerd,
217 bool useGpuPmePpComms,
218 bool receivePmeForceToGpu,
219 gmx_wallcycle_t wcycle)
221 real e_q, e_lj, dvdl_q, dvdl_lj;
222 float cycles_ppdpme, cycles_seppme;
224 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
225 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
227 /* In case of node-splitting, the PP nodes receive the long-range
228 * forces, virial and energy from the PME nodes here.
230 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
231 dvdl_q = 0;
232 dvdl_lj = 0;
233 gmx_pme_receive_f(fr->pmePpCommGpu.get(), cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
234 useGpuPmePpComms, receivePmeForceToGpu, &cycles_seppme);
235 enerd->term[F_COUL_RECIP] += e_q;
236 enerd->term[F_LJ_RECIP] += e_lj;
237 enerd->dvdl_lin[efptCOUL] += dvdl_q;
238 enerd->dvdl_lin[efptVDW] += dvdl_lj;
240 if (wcycle)
242 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
244 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
247 static void print_large_forces(FILE* fp,
248 const t_mdatoms* md,
249 const t_commrec* cr,
250 int64_t step,
251 real forceTolerance,
252 ArrayRef<const RVec> x,
253 ArrayRef<const RVec> f)
255 real force2Tolerance = gmx::square(forceTolerance);
256 gmx::index numNonFinite = 0;
257 for (int i = 0; i < md->homenr; i++)
259 real force2 = norm2(f[i]);
260 bool nonFinite = !std::isfinite(force2);
261 if (force2 >= force2Tolerance || nonFinite)
263 fprintf(fp, "step %" PRId64 " atom %6d x %8.3f %8.3f %8.3f force %12.5e\n", step,
264 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
266 if (nonFinite)
268 numNonFinite++;
271 if (numNonFinite > 0)
273 /* Note that with MPI this fatal call on one rank might interrupt
274 * the printing on other ranks. But we can only avoid that with
275 * an expensive MPI barrier that we would need at each step.
277 gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
281 //! When necessary, spreads forces on vsites and computes the virial for \p forceOutputs->forceWithShiftForces()
282 static void postProcessForceWithShiftForces(t_nrnb* nrnb,
283 gmx_wallcycle_t wcycle,
284 const matrix box,
285 ArrayRef<const RVec> x,
286 ForceOutputs* forceOutputs,
287 tensor vir_force,
288 const t_mdatoms& mdatoms,
289 const t_forcerec& fr,
290 gmx::VirtualSitesHandler* vsite,
291 const StepWorkload& stepWork)
293 ForceWithShiftForces& forceWithShiftForces = forceOutputs->forceWithShiftForces();
295 /* If we have NoVirSum forces, but we do not calculate the virial,
296 * we later sum the forceWithShiftForces buffer together with
297 * the noVirSum buffer and spread the combined vsite forces at once.
299 if (vsite && (!forceOutputs->haveForceWithVirial() || stepWork.computeVirial))
301 using VirialHandling = gmx::VirtualSitesHandler::VirialHandling;
303 auto f = forceWithShiftForces.force();
304 auto fshift = forceWithShiftForces.shiftForces();
305 const VirialHandling virialHandling =
306 (stepWork.computeVirial ? VirialHandling::Pbc : VirialHandling::None);
307 vsite->spreadForces(x, f, virialHandling, fshift, nullptr, nrnb, box, wcycle);
308 forceWithShiftForces.haveSpreadVsiteForces() = true;
311 if (stepWork.computeVirial)
313 /* Calculation of the virial must be done after vsites! */
314 calc_virial(0, mdatoms.homenr, as_rvec_array(x.data()), forceWithShiftForces, vir_force,
315 box, nrnb, &fr, fr.pbcType);
319 //! Spread, compute virial for and sum forces, when necessary
320 static void postProcessForces(const t_commrec* cr,
321 int64_t step,
322 t_nrnb* nrnb,
323 gmx_wallcycle_t wcycle,
324 const matrix box,
325 ArrayRef<const RVec> x,
326 ForceOutputs* forceOutputs,
327 tensor vir_force,
328 const t_mdatoms* mdatoms,
329 const t_forcerec* fr,
330 gmx::VirtualSitesHandler* vsite,
331 const StepWorkload& stepWork)
333 // Extract the final output force buffer, which is also the buffer for forces with shift forces
334 ArrayRef<RVec> f = forceOutputs->forceWithShiftForces().force();
336 if (forceOutputs->haveForceWithVirial())
338 auto& forceWithVirial = forceOutputs->forceWithVirial();
340 if (vsite)
342 /* Spread the mesh force on virtual sites to the other particles...
343 * This is parallellized. MPI communication is performed
344 * if the constructing atoms aren't local.
346 GMX_ASSERT(!stepWork.computeVirial || f.data() != forceWithVirial.force_.data(),
347 "We need separate force buffers for shift and virial forces when "
348 "computing the virial");
349 GMX_ASSERT(!stepWork.computeVirial
350 || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
351 "We should spread the force with shift forces separately when computing "
352 "the virial");
353 const gmx::VirtualSitesHandler::VirialHandling virialHandling =
354 (stepWork.computeVirial ? gmx::VirtualSitesHandler::VirialHandling::NonLinear
355 : gmx::VirtualSitesHandler::VirialHandling::None);
356 matrix virial = { { 0 } };
357 vsite->spreadForces(x, forceWithVirial.force_, virialHandling, {}, virial, nrnb, box, wcycle);
358 forceWithVirial.addVirialContribution(virial);
361 if (stepWork.computeVirial)
363 /* Now add the forces, this is local */
364 sum_forces(f, forceWithVirial.force_);
366 /* Add the direct virial contributions */
367 GMX_ASSERT(
368 forceWithVirial.computeVirial_,
369 "forceWithVirial should request virial computation when we request the virial");
370 m_add(vir_force, forceWithVirial.getVirial(), vir_force);
372 if (debug)
374 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
378 else
380 GMX_ASSERT(vsite == nullptr || forceOutputs->forceWithShiftForces().haveSpreadVsiteForces(),
381 "We should have spread the vsite forces (earlier)");
384 if (fr->print_force >= 0)
386 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
390 static void do_nb_verlet(t_forcerec* fr,
391 const interaction_const_t* ic,
392 gmx_enerdata_t* enerd,
393 const StepWorkload& stepWork,
394 const InteractionLocality ilocality,
395 const int clearF,
396 const int64_t step,
397 t_nrnb* nrnb,
398 gmx_wallcycle_t wcycle)
400 if (!stepWork.computeNonbondedForces)
402 /* skip non-bonded calculation */
403 return;
406 nonbonded_verlet_t* nbv = fr->nbv.get();
408 /* GPU kernel launch overhead is already timed separately */
409 if (!nbv->useGpu())
411 /* When dynamic pair-list pruning is requested, we need to prune
412 * at nstlistPrune steps.
414 if (nbv->isDynamicPruningStepCpu(step))
416 /* Prune the pair-list beyond fr->ic->rlistPrune using
417 * the current coordinates of the atoms.
419 wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
420 nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
421 wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
425 nbv->dispatchNonbondedKernel(ilocality, *ic, stepWork, clearF, *fr, enerd, nrnb);
428 static inline void clearRVecs(ArrayRef<RVec> v, const bool useOpenmpThreading)
430 int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, v.ssize());
432 /* Note that we would like to avoid this conditional by putting it
433 * into the omp pragma instead, but then we still take the full
434 * omp parallel for overhead (at least with gcc5).
436 if (!useOpenmpThreading || nth == 1)
438 for (RVec& elem : v)
440 clear_rvec(elem);
443 else
445 #pragma omp parallel for num_threads(nth) schedule(static)
446 for (gmx::index i = 0; i < v.ssize(); i++)
448 clear_rvec(v[i]);
453 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
455 * \param groupOptions Group options, containing T-coupling options
457 static real averageKineticEnergyEstimate(const t_grpopts& groupOptions)
459 real nrdfCoupled = 0;
460 real nrdfUncoupled = 0;
461 real kineticEnergy = 0;
462 for (int g = 0; g < groupOptions.ngtc; g++)
464 if (groupOptions.tau_t[g] >= 0)
466 nrdfCoupled += groupOptions.nrdf[g];
467 kineticEnergy += groupOptions.nrdf[g] * 0.5 * groupOptions.ref_t[g] * BOLTZ;
469 else
471 nrdfUncoupled += groupOptions.nrdf[g];
475 /* This conditional with > also catches nrdf=0 */
476 if (nrdfCoupled > nrdfUncoupled)
478 return kineticEnergy * (nrdfCoupled + nrdfUncoupled) / nrdfCoupled;
480 else
482 return 0;
486 /*! \brief This routine checks that the potential energy is finite.
488 * Always checks that the potential energy is finite. If step equals
489 * inputrec.init_step also checks that the magnitude of the potential energy
490 * is reasonable. Terminates with a fatal error when a check fails.
491 * Note that passing this check does not guarantee finite forces,
492 * since those use slightly different arithmetics. But in most cases
493 * there is just a narrow coordinate range where forces are not finite
494 * and energies are finite.
496 * \param[in] step The step number, used for checking and printing
497 * \param[in] enerd The energy data; the non-bonded group energies need to be added to
498 * enerd.term[F_EPOT] before calling this routine \param[in] inputrec The input record
500 static void checkPotentialEnergyValidity(int64_t step, const gmx_enerdata_t& enerd, const t_inputrec& inputrec)
502 /* Threshold valid for comparing absolute potential energy against
503 * the kinetic energy. Normally one should not consider absolute
504 * potential energy values, but with a factor of one million
505 * we should never get false positives.
507 constexpr real c_thresholdFactor = 1e6;
509 bool energyIsNotFinite = !std::isfinite(enerd.term[F_EPOT]);
510 real averageKineticEnergy = 0;
511 /* We only check for large potential energy at the initial step,
512 * because that is by far the most likely step for this too occur
513 * and because computing the average kinetic energy is not free.
514 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
515 * before they become NaN.
517 if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
519 averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
522 if (energyIsNotFinite
523 || (averageKineticEnergy > 0 && enerd.term[F_EPOT] > c_thresholdFactor * averageKineticEnergy))
525 gmx_fatal(
526 FARGS,
527 "Step %" PRId64
528 ": The total potential energy is %g, which is %s. The LJ and electrostatic "
529 "contributions to the energy are %g and %g, respectively. A %s potential energy "
530 "can be caused by overlapping interactions in bonded interactions or very large%s "
531 "coordinate values. Usually this is caused by a badly- or non-equilibrated initial "
532 "configuration, incorrect interactions or parameters in the topology.",
533 step, enerd.term[F_EPOT], energyIsNotFinite ? "not finite" : "extremely high",
534 enerd.term[F_LJ], enerd.term[F_COUL_SR],
535 energyIsNotFinite ? "non-finite" : "very high", energyIsNotFinite ? " or Nan" : "");
539 /*! \brief Return true if there are special forces computed this step.
541 * The conditionals exactly correspond to those in computeSpecialForces().
543 static bool haveSpecialForces(const t_inputrec& inputrec,
544 const gmx::ForceProviders& forceProviders,
545 const pull_t* pull_work,
546 const bool computeForces,
547 const gmx_edsam* ed)
550 return ((computeForces && forceProviders.hasForceProvider()) || // forceProviders
551 (inputrec.bPull && pull_have_potential(pull_work)) || // pull
552 inputrec.bRot || // enforced rotation
553 (ed != nullptr) || // flooding
554 (inputrec.bIMD && computeForces)); // IMD
557 /*! \brief Compute forces and/or energies for special algorithms
559 * The intention is to collect all calls to algorithms that compute
560 * forces on local atoms only and that do not contribute to the local
561 * virial sum (but add their virial contribution separately).
562 * Eventually these should likely all become ForceProviders.
563 * Within this function the intention is to have algorithms that do
564 * global communication at the end, so global barriers within the MD loop
565 * are as close together as possible.
567 * \param[in] fplog The log file
568 * \param[in] cr The communication record
569 * \param[in] inputrec The input record
570 * \param[in] awh The Awh module (nullptr if none in use).
571 * \param[in] enforcedRotation Enforced rotation module.
572 * \param[in] imdSession The IMD session
573 * \param[in] pull_work The pull work structure.
574 * \param[in] step The current MD step
575 * \param[in] t The current time
576 * \param[in,out] wcycle Wallcycle accounting struct
577 * \param[in,out] forceProviders Pointer to a list of force providers
578 * \param[in] box The unit cell
579 * \param[in] x The coordinates
580 * \param[in] mdatoms Per atom properties
581 * \param[in] lambda Array of free-energy lambda values
582 * \param[in] stepWork Step schedule flags
583 * \param[in,out] forceWithVirial Force and virial buffers
584 * \param[in,out] enerd Energy buffer
585 * \param[in,out] ed Essential dynamics pointer
586 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
588 * \todo Remove didNeighborSearch, which is used incorrectly.
589 * \todo Convert all other algorithms called here to ForceProviders.
591 static void computeSpecialForces(FILE* fplog,
592 const t_commrec* cr,
593 const t_inputrec* inputrec,
594 gmx::Awh* awh,
595 gmx_enfrot* enforcedRotation,
596 gmx::ImdSession* imdSession,
597 pull_t* pull_work,
598 int64_t step,
599 double t,
600 gmx_wallcycle_t wcycle,
601 gmx::ForceProviders* forceProviders,
602 const matrix box,
603 gmx::ArrayRef<const gmx::RVec> x,
604 const t_mdatoms* mdatoms,
605 gmx::ArrayRef<const real> lambda,
606 const StepWorkload& stepWork,
607 gmx::ForceWithVirial* forceWithVirial,
608 gmx_enerdata_t* enerd,
609 gmx_edsam* ed,
610 bool didNeighborSearch)
612 /* NOTE: Currently all ForceProviders only provide forces.
613 * When they also provide energies, remove this conditional.
615 if (stepWork.computeForces)
617 gmx::ForceProviderInput forceProviderInput(x, *mdatoms, t, box, *cr);
618 gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
620 /* Collect forces from modules */
621 forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
624 if (inputrec->bPull && pull_have_potential(pull_work))
626 pull_potential_wrapper(cr, inputrec, box, x, forceWithVirial, mdatoms, enerd, pull_work,
627 lambda.data(), t, wcycle);
629 if (awh)
631 const bool needForeignEnergyDifferences = awh->needForeignEnergyDifferences(step);
632 std::vector<double> foreignLambdaDeltaH, foreignLambdaDhDl;
633 if (needForeignEnergyDifferences)
635 enerd->foreignLambdaTerms.finalizePotentialContributions(enerd->dvdl_lin, lambda,
636 *inputrec->fepvals);
637 std::tie(foreignLambdaDeltaH, foreignLambdaDhDl) = enerd->foreignLambdaTerms.getTerms(cr);
640 enerd->term[F_COM_PULL] += awh->applyBiasForcesAndUpdateBias(
641 inputrec->pbcType, mdatoms->massT, foreignLambdaDeltaH, foreignLambdaDhDl, box,
642 forceWithVirial, t, step, wcycle, fplog);
645 rvec* f = as_rvec_array(forceWithVirial->force_.data());
647 /* Add the forces from enforced rotation potentials (if any) */
648 if (inputrec->bRot)
650 wallcycle_start(wcycle, ewcROTadd);
651 enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
652 wallcycle_stop(wcycle, ewcROTadd);
655 if (ed)
657 /* Note that since init_edsam() is called after the initialization
658 * of forcerec, edsam doesn't request the noVirSum force buffer.
659 * Thus if no other algorithm (e.g. PME) requires it, the forces
660 * here will contribute to the virial.
662 do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, didNeighborSearch);
665 /* Add forces from interactive molecular dynamics (IMD), if any */
666 if (inputrec->bIMD && stepWork.computeForces)
668 imdSession->applyForces(f);
672 /*! \brief Launch the prepare_step and spread stages of PME GPU.
674 * \param[in] pmedata The PME structure
675 * \param[in] box The box matrix
676 * \param[in] stepWork Step schedule flags
677 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory.
678 * \param[in] lambdaQ The Coulomb lambda of the current state.
679 * \param[in] wcycle The wallcycle structure
681 static inline void launchPmeGpuSpread(gmx_pme_t* pmedata,
682 const matrix box,
683 const StepWorkload& stepWork,
684 GpuEventSynchronizer* xReadyOnDevice,
685 const real lambdaQ,
686 gmx_wallcycle_t wcycle)
688 pme_gpu_prepare_computation(pmedata, box, wcycle, stepWork);
689 pme_gpu_launch_spread(pmedata, xReadyOnDevice, wcycle, lambdaQ);
692 /*! \brief Launch the FFT and gather stages of PME GPU
694 * This function only implements setting the output forces (no accumulation).
696 * \param[in] pmedata The PME structure
697 * \param[in] lambdaQ The Coulomb lambda of the current system state.
698 * \param[in] wcycle The wallcycle structure
699 * \param[in] stepWork Step schedule flags
701 static void launchPmeGpuFftAndGather(gmx_pme_t* pmedata,
702 const real lambdaQ,
703 gmx_wallcycle_t wcycle,
704 const gmx::StepWorkload& stepWork)
706 pme_gpu_launch_complex_transforms(pmedata, wcycle, stepWork);
707 pme_gpu_launch_gather(pmedata, wcycle, lambdaQ);
710 /*! \brief
711 * Polling wait for either of the PME or nonbonded GPU tasks.
713 * Instead of a static order in waiting for GPU tasks, this function
714 * polls checking which of the two tasks completes first, and does the
715 * associated force buffer reduction overlapped with the other task.
716 * By doing that, unlike static scheduling order, it can always overlap
717 * one of the reductions, regardless of the GPU task completion order.
719 * \param[in] nbv Nonbonded verlet structure
720 * \param[in,out] pmedata PME module data
721 * \param[in,out] forceOutputsNonbonded Force outputs for the non-bonded forces and shift forces
722 * \param[in,out] forceOutputsPme Force outputs for the PME forces and virial
723 * \param[in,out] enerd Energy data structure results are reduced into
724 * \param[in] lambdaQ The Coulomb lambda of the current system state.
725 * \param[in] stepWork Step schedule flags
726 * \param[in] wcycle The wallcycle structure
728 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t* nbv,
729 gmx_pme_t* pmedata,
730 gmx::ForceOutputs* forceOutputsNonbonded,
731 gmx::ForceOutputs* forceOutputsPme,
732 gmx_enerdata_t* enerd,
733 const real lambdaQ,
734 const StepWorkload& stepWork,
735 gmx_wallcycle_t wcycle)
737 bool isPmeGpuDone = false;
738 bool isNbGpuDone = false;
740 gmx::ArrayRef<const gmx::RVec> pmeGpuForces;
742 while (!isPmeGpuDone || !isNbGpuDone)
744 if (!isPmeGpuDone)
746 GpuTaskCompletion completionType =
747 (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
748 isPmeGpuDone = pme_gpu_try_finish_task(pmedata, stepWork, wcycle,
749 &forceOutputsPme->forceWithVirial(), enerd,
750 lambdaQ, completionType);
753 if (!isNbGpuDone)
755 auto& forceBuffersNonbonded = forceOutputsNonbonded->forceWithShiftForces();
756 GpuTaskCompletion completionType =
757 (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
758 isNbGpuDone = Nbnxm::gpu_try_finish_task(
759 nbv->gpu_nbv, stepWork, AtomLocality::Local, enerd->grpp.ener[egLJSR].data(),
760 enerd->grpp.ener[egCOULSR].data(), forceBuffersNonbonded.shiftForces(),
761 completionType, wcycle);
763 if (isNbGpuDone)
765 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceBuffersNonbonded.force());
771 /*! \brief Set up the different force buffers; also does clearing.
773 * \param[in] forceHelperBuffers Helper force buffers
774 * \param[in] force force array
775 * \param[in] stepWork Step schedule flags
776 * \param[out] wcycle wallcycle recording structure
778 * \returns Cleared force output structure
780 static ForceOutputs setupForceOutputs(ForceHelperBuffers* forceHelperBuffers,
781 gmx::ArrayRefWithPadding<gmx::RVec> force,
782 const StepWorkload& stepWork,
783 gmx_wallcycle_t wcycle)
785 wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
787 /* NOTE: We assume fr->shiftForces is all zeros here */
788 gmx::ForceWithShiftForces forceWithShiftForces(force, stepWork.computeVirial,
789 forceHelperBuffers->shiftForces());
791 if (stepWork.computeForces)
793 /* Clear the short- and long-range forces */
794 clearRVecs(forceWithShiftForces.force(), true);
796 /* Clear the shift forces */
797 clearRVecs(forceWithShiftForces.shiftForces(), false);
800 /* If we need to compute the virial, we might need a separate
801 * force buffer for algorithms for which the virial is calculated
802 * directly, such as PME. Otherwise, forceWithVirial uses the
803 * the same force (f in legacy calls) buffer as other algorithms.
805 const bool useSeparateForceWithVirialBuffer =
806 (stepWork.computeForces
807 && (stepWork.computeVirial && forceHelperBuffers->haveDirectVirialContributions()));
808 /* forceWithVirial uses the local atom range only */
809 gmx::ForceWithVirial forceWithVirial(
810 useSeparateForceWithVirialBuffer ? forceHelperBuffers->forceBufferForDirectVirialContributions()
811 : force.unpaddedArrayRef(),
812 stepWork.computeVirial);
814 if (useSeparateForceWithVirialBuffer)
816 /* TODO: update comment
817 * We only compute forces on local atoms. Note that vsites can
818 * spread to non-local atoms, but that part of the buffer is
819 * cleared separately in the vsite spreading code.
821 clearRVecs(forceWithVirial.force_, true);
824 wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
826 return ForceOutputs(forceWithShiftForces, forceHelperBuffers->haveDirectVirialContributions(),
827 forceWithVirial);
831 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
833 static DomainLifetimeWorkload setupDomainLifetimeWorkload(const t_inputrec& inputrec,
834 const t_forcerec& fr,
835 const pull_t* pull_work,
836 const gmx_edsam* ed,
837 const t_mdatoms& mdatoms,
838 const SimulationWorkload& simulationWork,
839 const StepWorkload& stepWork)
841 DomainLifetimeWorkload domainWork;
842 // Note that haveSpecialForces is constant over the whole run
843 domainWork.haveSpecialForces =
844 haveSpecialForces(inputrec, *fr.forceProviders, pull_work, stepWork.computeForces, ed);
845 domainWork.haveCpuListedForceWork = false;
846 domainWork.haveCpuBondedWork = false;
847 for (const auto& listedForces : fr.listedForces)
849 if (listedForces.haveCpuListedForces(*fr.fcdata))
851 domainWork.haveCpuListedForceWork = true;
853 if (listedForces.haveCpuBondeds())
855 domainWork.haveCpuBondedWork = true;
858 domainWork.haveGpuBondedWork = ((fr.gpuBonded != nullptr) && fr.gpuBonded->haveInteractions());
859 // Note that haveFreeEnergyWork is constant over the whole run
860 domainWork.haveFreeEnergyWork = (fr.efep != efepNO && mdatoms.nPerturbed != 0);
861 // We assume we have local force work if there are CPU
862 // force tasks including PME or nonbondeds.
863 domainWork.haveCpuLocalForceWork =
864 domainWork.haveSpecialForces || domainWork.haveCpuListedForceWork
865 || domainWork.haveFreeEnergyWork || simulationWork.useCpuNonbonded || simulationWork.useCpuPme
866 || simulationWork.haveEwaldSurfaceContribution || inputrec.nwall > 0;
868 return domainWork;
871 /*! \brief Set up force flag stuct from the force bitmask.
873 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
874 * \param[in] mtsLevels The multiple time-stepping levels, either empty or 2 levels
875 * \param[in] step The current MD step
876 * \param[in] simulationWork Simulation workload description.
877 * \param[in] rankHasPmeDuty If this rank computes PME.
879 * \returns New Stepworkload description.
881 static StepWorkload setupStepWorkload(const int legacyFlags,
882 ArrayRef<const gmx::MtsLevel> mtsLevels,
883 const int64_t step,
884 const SimulationWorkload& simulationWork,
885 const bool rankHasPmeDuty)
887 GMX_ASSERT(mtsLevels.empty() || mtsLevels.size() == 2, "Expect 0 or 2 MTS levels");
888 const bool computeSlowForces = (mtsLevels.empty() || step % mtsLevels[1].stepFactor == 0);
890 StepWorkload flags;
891 flags.stateChanged = ((legacyFlags & GMX_FORCE_STATECHANGED) != 0);
892 flags.haveDynamicBox = ((legacyFlags & GMX_FORCE_DYNAMICBOX) != 0);
893 flags.doNeighborSearch = ((legacyFlags & GMX_FORCE_NS) != 0);
894 flags.computeSlowForces = computeSlowForces;
895 flags.computeVirial = ((legacyFlags & GMX_FORCE_VIRIAL) != 0);
896 flags.computeEnergy = ((legacyFlags & GMX_FORCE_ENERGY) != 0);
897 flags.computeForces = ((legacyFlags & GMX_FORCE_FORCES) != 0);
898 flags.computeListedForces = ((legacyFlags & GMX_FORCE_LISTED) != 0);
899 flags.computeNonbondedForces =
900 ((legacyFlags & GMX_FORCE_NONBONDED) != 0) && simulationWork.computeNonbonded
901 && !(simulationWork.computeNonbondedAtMtsLevel1 && !computeSlowForces);
902 flags.computeDhdl = ((legacyFlags & GMX_FORCE_DHDL) != 0);
904 if (simulationWork.useGpuBufferOps)
906 GMX_ASSERT(simulationWork.useGpuNonbonded,
907 "Can only offload buffer ops if nonbonded computation is also offloaded");
909 flags.useGpuXBufferOps = simulationWork.useGpuBufferOps;
910 // on virial steps the CPU reduction path is taken
911 flags.useGpuFBufferOps = simulationWork.useGpuBufferOps && !flags.computeVirial;
912 flags.useGpuPmeFReduction = flags.computeSlowForces && flags.useGpuFBufferOps && simulationWork.useGpuPme
913 && (rankHasPmeDuty || simulationWork.useGpuPmePpCommunication);
915 return flags;
919 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
921 * TODO: eliminate \p useGpuPmeOnThisRank when this is
922 * incorporated in DomainLifetimeWorkload.
924 static void launchGpuEndOfStepTasks(nonbonded_verlet_t* nbv,
925 gmx::GpuBonded* gpuBonded,
926 gmx_pme_t* pmedata,
927 gmx_enerdata_t* enerd,
928 const gmx::MdrunScheduleWorkload& runScheduleWork,
929 bool useGpuPmeOnThisRank,
930 int64_t step,
931 gmx_wallcycle_t wcycle)
933 if (runScheduleWork.simulationWork.useGpuNonbonded && runScheduleWork.stepWork.computeNonbondedForces)
935 /* Launch pruning before buffer clearing because the API overhead of the
936 * clear kernel launches can leave the GPU idle while it could be running
937 * the prune kernel.
939 if (nbv->isDynamicPruningStepGpu(step))
941 nbv->dispatchPruneKernelGpu(step);
944 /* now clear the GPU outputs while we finish the step on the CPU */
945 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
946 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
947 Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, runScheduleWork.stepWork.computeVirial);
948 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
949 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
952 if (useGpuPmeOnThisRank)
954 pme_gpu_reinit_computation(pmedata, wcycle);
957 if (runScheduleWork.domainWork.haveGpuBondedWork && runScheduleWork.stepWork.computeEnergy)
959 // in principle this should be included in the DD balancing region,
960 // but generally it is infrequent so we'll omit it for the sake of
961 // simpler code
962 gpuBonded->waitAccumulateEnergyTerms(enerd);
964 gpuBonded->clearEnergies();
968 //! \brief Data structure to hold dipole-related data and staging arrays
969 struct DipoleData
971 //! Dipole staging for fast summing over MPI
972 gmx::DVec muStaging[2] = { { 0.0, 0.0, 0.0 } };
973 //! Dipole staging for states A and B (index 0 and 1 resp.)
974 gmx::RVec muStateAB[2] = { { 0.0_real, 0.0_real, 0.0_real } };
978 static void reduceAndUpdateMuTot(DipoleData* dipoleData,
979 const t_commrec* cr,
980 const bool haveFreeEnergy,
981 gmx::ArrayRef<const real> lambda,
982 rvec muTotal,
983 const DDBalanceRegionHandler& ddBalanceRegionHandler)
985 if (PAR(cr))
987 gmx_sumd(2 * DIM, dipoleData->muStaging[0], cr);
988 ddBalanceRegionHandler.reopenRegionCpu();
990 for (int i = 0; i < 2; i++)
992 for (int j = 0; j < DIM; j++)
994 dipoleData->muStateAB[i][j] = dipoleData->muStaging[i][j];
998 if (!haveFreeEnergy)
1000 copy_rvec(dipoleData->muStateAB[0], muTotal);
1002 else
1004 for (int j = 0; j < DIM; j++)
1006 muTotal[j] = (1.0 - lambda[efptCOUL]) * dipoleData->muStateAB[0][j]
1007 + lambda[efptCOUL] * dipoleData->muStateAB[1][j];
1012 /*! \brief Combines MTS level0 and level1 force buffes into a full and MTS-combined force buffer.
1014 * \param[in] numAtoms The number of atoms to combine forces for
1015 * \param[in,out] forceMtsLevel0 Input: F_level0, output: F_level0 + F_level1
1016 * \param[in,out] forceMts Input: F_level1, output: F_level0 + mtsFactor * F_level1
1017 * \param[in] mtsFactor The factor between the level0 and level1 time step
1019 static void combineMtsForces(const int numAtoms,
1020 ArrayRef<RVec> forceMtsLevel0,
1021 ArrayRef<RVec> forceMts,
1022 const real mtsFactor)
1024 const int gmx_unused numThreads = gmx_omp_nthreads_get(emntDefault);
1025 #pragma omp parallel for num_threads(numThreads) schedule(static)
1026 for (int i = 0; i < numAtoms; i++)
1028 const RVec forceMtsLevel0Tmp = forceMtsLevel0[i];
1029 forceMtsLevel0[i] += forceMts[i];
1030 forceMts[i] = forceMtsLevel0Tmp + mtsFactor * forceMts[i];
1034 /*! \brief Setup for the local and non-local GPU force reductions:
1035 * reinitialization plus the registration of forces and dependencies.
1037 * \param [in] runScheduleWork Schedule workload flag structure
1038 * \param [in] cr Communication record object
1039 * \param [in] fr Force record object
1040 * \param [in] ddUsesGpuDirectCommunication Whether GPU direct communication is in use
1042 static void setupGpuForceReductions(gmx::MdrunScheduleWorkload* runScheduleWork,
1043 const t_commrec* cr,
1044 t_forcerec* fr,
1045 bool ddUsesGpuDirectCommunication)
1048 nonbonded_verlet_t* nbv = fr->nbv.get();
1049 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1051 // (re-)initialize local GPU force reduction
1052 const bool accumulate =
1053 runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr);
1054 const int atomStart = 0;
1055 fr->gpuForceReduction[gmx::AtomLocality::Local]->reinit(
1056 stateGpu->getForces(), nbv->getNumAtoms(AtomLocality::Local), nbv->getGridIndices(),
1057 atomStart, accumulate, stateGpu->fReducedOnDevice());
1059 // register forces and add dependencies
1060 fr->gpuForceReduction[gmx::AtomLocality::Local]->registerNbnxmForce(nbv->getGpuForces());
1062 if (runScheduleWork->simulationWork.useGpuPme
1063 && (thisRankHasDuty(cr, DUTY_PME) || runScheduleWork->simulationWork.useGpuPmePpCommunication))
1065 void* forcePtr = thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_device_f(fr->pmedata)
1066 : // PME force buffer on same GPU
1067 fr->pmePpCommGpu->getGpuForceStagingPtr(); // buffer received from other GPU
1068 fr->gpuForceReduction[gmx::AtomLocality::Local]->registerRvecForce(forcePtr);
1070 GpuEventSynchronizer* const pmeSynchronizer =
1071 (thisRankHasDuty(cr, DUTY_PME) ? pme_gpu_get_f_ready_synchronizer(fr->pmedata)
1072 : // PME force buffer on same GPU
1073 fr->pmePpCommGpu->getForcesReadySynchronizer()); // buffer received from other GPU
1074 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(pmeSynchronizer);
1077 if ((runScheduleWork->domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr))
1078 && !ddUsesGpuDirectCommunication)
1080 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1081 stateGpu->getForcesReadyOnDeviceEvent(AtomLocality::Local, true));
1084 if (ddUsesGpuDirectCommunication)
1086 fr->gpuForceReduction[gmx::AtomLocality::Local]->addDependency(
1087 cr->dd->gpuHaloExchange[0][0]->getForcesReadyOnDeviceEvent());
1090 if (havePPDomainDecomposition(cr))
1092 // (re-)initialize non-local GPU force reduction
1093 const bool accumulate = runScheduleWork->domainWork.haveCpuBondedWork
1094 || runScheduleWork->domainWork.haveFreeEnergyWork;
1095 const int atomStart = dd_numHomeAtoms(*cr->dd);
1096 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->reinit(
1097 stateGpu->getForces(), nbv->getNumAtoms(AtomLocality::NonLocal),
1098 nbv->getGridIndices(), atomStart, accumulate);
1100 // register forces and add dependencies
1101 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->registerNbnxmForce(nbv->getGpuForces());
1102 if (runScheduleWork->domainWork.haveCpuBondedWork || runScheduleWork->domainWork.haveFreeEnergyWork)
1104 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->addDependency(
1105 stateGpu->getForcesReadyOnDeviceEvent(AtomLocality::NonLocal, true));
1111 void do_force(FILE* fplog,
1112 const t_commrec* cr,
1113 const gmx_multisim_t* ms,
1114 const t_inputrec* inputrec,
1115 gmx::Awh* awh,
1116 gmx_enfrot* enforcedRotation,
1117 gmx::ImdSession* imdSession,
1118 pull_t* pull_work,
1119 int64_t step,
1120 t_nrnb* nrnb,
1121 gmx_wallcycle_t wcycle,
1122 const gmx_localtop_t* top,
1123 const matrix box,
1124 gmx::ArrayRefWithPadding<gmx::RVec> x,
1125 history_t* hist,
1126 gmx::ForceBuffersView* forceView,
1127 tensor vir_force,
1128 const t_mdatoms* mdatoms,
1129 gmx_enerdata_t* enerd,
1130 gmx::ArrayRef<const real> lambda,
1131 t_forcerec* fr,
1132 gmx::MdrunScheduleWorkload* runScheduleWork,
1133 gmx::VirtualSitesHandler* vsite,
1134 rvec muTotal,
1135 double t,
1136 gmx_edsam* ed,
1137 int legacyFlags,
1138 const DDBalanceRegionHandler& ddBalanceRegionHandler)
1140 auto force = forceView->forceWithPadding();
1141 GMX_ASSERT(force.unpaddedArrayRef().ssize() >= fr->natoms_force_constr,
1142 "The size of the force buffer should be at least the number of atoms to compute "
1143 "forces for");
1145 nonbonded_verlet_t* nbv = fr->nbv.get();
1146 interaction_const_t* ic = fr->ic;
1148 gmx::StatePropagatorDataGpu* stateGpu = fr->stateGpu;
1150 const SimulationWorkload& simulationWork = runScheduleWork->simulationWork;
1152 runScheduleWork->stepWork = setupStepWorkload(legacyFlags, inputrec->mtsLevels, step,
1153 simulationWork, thisRankHasDuty(cr, DUTY_PME));
1154 const StepWorkload& stepWork = runScheduleWork->stepWork;
1156 const bool useGpuPmeOnThisRank =
1157 simulationWork.useGpuPme && thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces;
1159 /* At a search step we need to start the first balancing region
1160 * somewhere early inside the step after communication during domain
1161 * decomposition (and not during the previous step as usual).
1163 if (stepWork.doNeighborSearch)
1165 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
1168 clear_mat(vir_force);
1170 if (fr->pbcType != PbcType::No)
1172 /* Compute shift vectors every step,
1173 * because of pressure coupling or box deformation!
1175 if (stepWork.haveDynamicBox && stepWork.stateChanged)
1177 calc_shifts(box, fr->shift_vec);
1180 const bool fillGrid = (stepWork.doNeighborSearch && stepWork.stateChanged);
1181 const bool calcCGCM = (fillGrid && !DOMAINDECOMP(cr));
1182 if (calcCGCM)
1184 put_atoms_in_box_omp(fr->pbcType, box, x.unpaddedArrayRef().subArray(0, mdatoms->homenr),
1185 gmx_omp_nthreads_get(emntDefault));
1186 inc_nrnb(nrnb, eNR_SHIFTX, mdatoms->homenr);
1190 nbnxn_atomdata_copy_shiftvec(stepWork.haveDynamicBox, fr->shift_vec, nbv->nbat.get());
1192 const bool pmeSendCoordinatesFromGpu =
1193 GMX_MPI && simulationWork.useGpuPmePpCommunication && !(stepWork.doNeighborSearch);
1194 const bool reinitGpuPmePpComms =
1195 GMX_MPI && simulationWork.useGpuPmePpCommunication && (stepWork.doNeighborSearch);
1197 const auto localXReadyOnDevice = (useGpuPmeOnThisRank || simulationWork.useGpuBufferOps)
1198 ? stateGpu->getCoordinatesReadyOnDeviceEvent(
1199 AtomLocality::Local, simulationWork, stepWork)
1200 : nullptr;
1202 // If coordinates are to be sent to PME task from CPU memory, perform that send here.
1203 // Otherwise the send will occur after H2D coordinate transfer.
1204 if (GMX_MPI && !thisRankHasDuty(cr, DUTY_PME) && !pmeSendCoordinatesFromGpu && stepWork.computeSlowForces)
1206 /* Send particle coordinates to the pme nodes */
1207 if (!stepWork.doNeighborSearch && simulationWork.useGpuUpdate)
1209 GMX_RELEASE_ASSERT(false,
1210 "GPU update and separate PME ranks are only supported with GPU "
1211 "direct communication!");
1212 // TODO: when this code-path becomes supported add:
1213 // stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1216 gmx_pme_send_coordinates(fr, cr, box, as_rvec_array(x.unpaddedArrayRef().data()), lambda[efptCOUL],
1217 lambda[efptVDW], (stepWork.computeVirial || stepWork.computeEnergy),
1218 step, simulationWork.useGpuPmePpCommunication, reinitGpuPmePpComms,
1219 pmeSendCoordinatesFromGpu, localXReadyOnDevice, wcycle);
1222 // Coordinates on the device are needed if PME or BufferOps are offloaded.
1223 // The local coordinates can be copied right away.
1224 // NOTE: Consider moving this copy to right after they are updated and constrained,
1225 // if the later is not offloaded.
1226 if (useGpuPmeOnThisRank || stepWork.useGpuXBufferOps)
1228 if (stepWork.doNeighborSearch)
1230 // TODO refactor this to do_md, after partitioning.
1231 stateGpu->reinit(mdatoms->homenr,
1232 cr->dd != nullptr ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr);
1233 if (useGpuPmeOnThisRank)
1235 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1236 pme_gpu_set_device_x(fr->pmedata, stateGpu->getCoordinates());
1239 // We need to copy coordinates when:
1240 // 1. Update is not offloaded
1241 // 2. The buffers were reinitialized on search step
1242 if (!simulationWork.useGpuUpdate || stepWork.doNeighborSearch)
1244 GMX_ASSERT(stateGpu != nullptr, "stateGpu should not be null");
1245 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1249 // TODO Update this comment when introducing SimulationWorkload
1251 // The conditions for gpuHaloExchange e.g. using GPU buffer
1252 // operations were checked before construction, so here we can
1253 // just use it and assert upon any conditions.
1254 const bool ddUsesGpuDirectCommunication =
1255 ((cr->dd != nullptr) && (!cr->dd->gpuHaloExchange[0].empty()));
1256 GMX_ASSERT(!ddUsesGpuDirectCommunication || stepWork.useGpuXBufferOps,
1257 "Must use coordinate buffer ops with GPU halo exchange");
1258 const bool useGpuForcesHaloExchange = ddUsesGpuDirectCommunication && stepWork.useGpuFBufferOps;
1260 // Copy coordinate from the GPU if update is on the GPU and there
1261 // are forces to be computed on the CPU, or for the computation of
1262 // virial, or if host-side data will be transferred from this task
1263 // to a remote task for halo exchange or PME-PP communication. At
1264 // search steps the current coordinates are already on the host,
1265 // hence copy is not needed.
1266 const bool haveHostPmePpComms =
1267 !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication;
1268 const bool haveHostHaloExchangeComms = havePPDomainDecomposition(cr) && !ddUsesGpuDirectCommunication;
1270 bool gmx_used_in_debug haveCopiedXFromGpu = false;
1271 if (simulationWork.useGpuUpdate && !stepWork.doNeighborSearch
1272 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial
1273 || haveHostPmePpComms || haveHostHaloExchangeComms))
1275 GMX_ASSERT(stateGpu != nullptr, "stateGpu should not be null");
1276 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::Local);
1277 haveCopiedXFromGpu = true;
1280 // If coordinates are to be sent to PME task from GPU memory, perform that send here.
1281 // Otherwise the send will occur before the H2D coordinate transfer.
1282 if (!thisRankHasDuty(cr, DUTY_PME) && pmeSendCoordinatesFromGpu)
1284 /* Send particle coordinates to the pme nodes */
1285 gmx_pme_send_coordinates(fr, cr, box, as_rvec_array(x.unpaddedArrayRef().data()), lambda[efptCOUL],
1286 lambda[efptVDW], (stepWork.computeVirial || stepWork.computeEnergy),
1287 step, simulationWork.useGpuPmePpCommunication, reinitGpuPmePpComms,
1288 pmeSendCoordinatesFromGpu, localXReadyOnDevice, wcycle);
1291 if (useGpuPmeOnThisRank)
1293 launchPmeGpuSpread(fr->pmedata, box, stepWork, localXReadyOnDevice, lambda[efptCOUL], wcycle);
1296 const gmx::DomainLifetimeWorkload& domainWork = runScheduleWork->domainWork;
1298 /* do gridding for pair search */
1299 if (stepWork.doNeighborSearch)
1301 if (fr->wholeMoleculeTransform && stepWork.stateChanged)
1303 fr->wholeMoleculeTransform->updateForAtomPbcJumps(x.unpaddedArrayRef(), box);
1306 // TODO
1307 // - vzero is constant, do we need to pass it?
1308 // - box_diag should be passed directly to nbnxn_put_on_grid
1310 rvec vzero;
1311 clear_rvec(vzero);
1313 rvec box_diag;
1314 box_diag[XX] = box[XX][XX];
1315 box_diag[YY] = box[YY][YY];
1316 box_diag[ZZ] = box[ZZ][ZZ];
1318 wallcycle_start(wcycle, ewcNS);
1319 if (!DOMAINDECOMP(cr))
1321 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1322 nbnxn_put_on_grid(nbv, box, 0, vzero, box_diag, nullptr, { 0, mdatoms->homenr }, -1,
1323 fr->cginfo, x.unpaddedArrayRef(), 0, nullptr);
1324 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1326 else
1328 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1329 nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd), fr->cginfo, x.unpaddedArrayRef());
1330 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1333 nbv->setAtomProperties(gmx::constArrayRefFromArray(mdatoms->typeA, mdatoms->nr),
1334 gmx::constArrayRefFromArray(mdatoms->chargeA, mdatoms->nr), fr->cginfo);
1336 wallcycle_stop(wcycle, ewcNS);
1338 /* initialize the GPU nbnxm atom data and bonded data structures */
1339 if (simulationWork.useGpuNonbonded)
1341 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1343 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1344 Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1345 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1347 if (fr->gpuBonded)
1349 /* Now we put all atoms on the grid, we can assign bonded
1350 * interactions to the GPU, where the grid order is
1351 * needed. Also the xq, f and fshift device buffers have
1352 * been reallocated if needed, so the bonded code can
1353 * learn about them. */
1354 // TODO the xq, f, and fshift buffers are now shared
1355 // resources, so they should be maintained by a
1356 // higher-level object than the nb module.
1357 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(
1358 nbv->getGridIndices(), top->idef, Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1359 Nbnxm::gpu_get_f(nbv->gpu_nbv), Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1361 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1364 // Need to run after the GPU-offload bonded interaction lists
1365 // are set up to be able to determine whether there is bonded work.
1366 runScheduleWork->domainWork = setupDomainLifetimeWorkload(
1367 *inputrec, *fr, pull_work, ed, *mdatoms, simulationWork, stepWork);
1369 wallcycle_start_nocount(wcycle, ewcNS);
1370 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1371 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1372 nbv->constructPairlist(InteractionLocality::Local, top->excls, step, nrnb);
1374 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::Local);
1376 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1377 wallcycle_stop(wcycle, ewcNS);
1379 if (stepWork.useGpuXBufferOps)
1381 nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1384 if (simulationWork.useGpuBufferOps)
1386 setupGpuForceReductions(runScheduleWork, cr, fr, ddUsesGpuDirectCommunication);
1389 else if (!EI_TPI(inputrec->eI) && stepWork.computeNonbondedForces)
1391 if (stepWork.useGpuXBufferOps)
1393 GMX_ASSERT(stateGpu, "stateGpu should be valid when buffer ops are offloaded");
1394 nbv->convertCoordinatesGpu(AtomLocality::Local, false, stateGpu->getCoordinates(),
1395 localXReadyOnDevice);
1397 else
1399 if (simulationWork.useGpuUpdate)
1401 GMX_ASSERT(stateGpu, "need a valid stateGpu object");
1402 GMX_ASSERT(haveCopiedXFromGpu,
1403 "a wait should only be triggered if copy has been scheduled");
1404 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1406 nbv->convertCoordinates(AtomLocality::Local, false, x.unpaddedArrayRef());
1410 if (simulationWork.useGpuNonbonded && (stepWork.computeNonbondedForces || domainWork.haveGpuBondedWork))
1412 ddBalanceRegionHandler.openBeforeForceComputationGpu();
1414 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1416 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1417 Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1418 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1420 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::Local);
1422 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1423 // with X buffer ops offloaded to the GPU on all but the search steps
1425 // bonded work not split into separate local and non-local, so with DD
1426 // we can only launch the kernel after non-local coordinates have been received.
1427 if (domainWork.haveGpuBondedWork && !havePPDomainDecomposition(cr))
1429 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1430 fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1431 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1434 /* launch local nonbonded work on GPU */
1435 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1436 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFNo, step, nrnb, wcycle);
1437 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1438 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1441 if (useGpuPmeOnThisRank)
1443 // In PME GPU and mixed mode we launch FFT / gather after the
1444 // X copy/transform to allow overlap as well as after the GPU NB
1445 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1446 // the nonbonded kernel.
1447 launchPmeGpuFftAndGather(fr->pmedata, lambda[efptCOUL], wcycle, stepWork);
1450 /* Communicate coordinates and sum dipole if necessary +
1451 do non-local pair search */
1452 if (havePPDomainDecomposition(cr))
1454 if (stepWork.doNeighborSearch)
1456 // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1457 wallcycle_start_nocount(wcycle, ewcNS);
1458 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1459 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1460 nbv->constructPairlist(InteractionLocality::NonLocal, top->excls, step, nrnb);
1462 nbv->setupGpuShortRangeWork(fr->gpuBonded, InteractionLocality::NonLocal);
1463 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1464 wallcycle_stop(wcycle, ewcNS);
1465 // TODO refactor this GPU halo exchange re-initialisation
1466 // to location in do_md where GPU halo exchange is
1467 // constructed at partitioning, after above stateGpu
1468 // re-initialization has similarly been refactored
1469 if (ddUsesGpuDirectCommunication)
1471 reinitGpuHaloExchange(*cr, stateGpu->getCoordinates(), stateGpu->getForces());
1474 else
1476 if (ddUsesGpuDirectCommunication)
1478 // The following must be called after local setCoordinates (which records an event
1479 // when the coordinate data has been copied to the device).
1480 communicateGpuHaloCoordinates(*cr, box, localXReadyOnDevice);
1482 if (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork)
1484 // non-local part of coordinate buffer must be copied back to host for CPU work
1485 stateGpu->copyCoordinatesFromGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1488 else
1490 // Note: GPU update + DD without direct communication is not supported,
1491 // a waitCoordinatesReadyOnHost() should be issued if it will be.
1492 GMX_ASSERT(!simulationWork.useGpuUpdate,
1493 "GPU update is not supported with CPU halo exchange");
1494 dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1497 if (stepWork.useGpuXBufferOps)
1499 if (!useGpuPmeOnThisRank && !ddUsesGpuDirectCommunication)
1501 stateGpu->copyCoordinatesToGpu(x.unpaddedArrayRef(), AtomLocality::NonLocal);
1503 nbv->convertCoordinatesGpu(AtomLocality::NonLocal, false, stateGpu->getCoordinates(),
1504 stateGpu->getCoordinatesReadyOnDeviceEvent(
1505 AtomLocality::NonLocal, simulationWork, stepWork));
1507 else
1509 nbv->convertCoordinates(AtomLocality::NonLocal, false, x.unpaddedArrayRef());
1513 if (simulationWork.useGpuNonbonded)
1515 wallcycle_start(wcycle, ewcLAUNCH_GPU);
1517 if (stepWork.doNeighborSearch || !stepWork.useGpuXBufferOps)
1519 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1520 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(), AtomLocality::NonLocal);
1521 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1524 if (domainWork.haveGpuBondedWork)
1526 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1527 fr->gpuBonded->setPbcAndlaunchKernel(fr->pbcType, box, fr->bMolPBC, stepWork);
1528 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1531 /* launch non-local nonbonded tasks on GPU */
1532 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1533 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step,
1534 nrnb, wcycle);
1535 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1537 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1541 if (simulationWork.useGpuNonbonded && stepWork.computeNonbondedForces)
1543 /* launch D2H copy-back F */
1544 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1545 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1547 if (havePPDomainDecomposition(cr))
1549 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::NonLocal);
1551 Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(), stepWork, AtomLocality::Local);
1552 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1554 if (domainWork.haveGpuBondedWork && stepWork.computeEnergy)
1556 fr->gpuBonded->launchEnergyTransfer();
1558 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1561 gmx::ArrayRef<const gmx::RVec> xWholeMolecules;
1562 if (fr->wholeMoleculeTransform)
1564 xWholeMolecules = fr->wholeMoleculeTransform->wholeMoleculeCoordinates(x.unpaddedArrayRef(), box);
1567 DipoleData dipoleData;
1569 if (simulationWork.computeMuTot)
1571 const int start = 0;
1573 /* Calculate total (local) dipole moment in a temporary common array.
1574 * This makes it possible to sum them over nodes faster.
1576 gmx::ArrayRef<const gmx::RVec> xRef =
1577 (xWholeMolecules.empty() ? x.unpaddedArrayRef() : xWholeMolecules);
1578 calc_mu(start, mdatoms->homenr, xRef, mdatoms->chargeA, mdatoms->chargeB,
1579 mdatoms->nChargePerturbed, dipoleData.muStaging[0], dipoleData.muStaging[1]);
1581 reduceAndUpdateMuTot(&dipoleData, cr, (fr->efep != efepNO), lambda, muTotal, ddBalanceRegionHandler);
1584 /* Reset energies */
1585 reset_enerdata(enerd);
1587 if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1589 wallcycle_start(wcycle, ewcPPDURINGPME);
1590 dd_force_flop_start(cr->dd, nrnb);
1593 // For the rest of the CPU tasks that depend on GPU-update produced coordinates,
1594 // this wait ensures that the D2H transfer is complete.
1595 if ((simulationWork.useGpuUpdate)
1596 && (runScheduleWork->domainWork.haveCpuLocalForceWork || stepWork.computeVirial))
1598 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::Local);
1601 if (inputrec->bRot)
1603 wallcycle_start(wcycle, ewcROT);
1604 do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step,
1605 stepWork.doNeighborSearch);
1606 wallcycle_stop(wcycle, ewcROT);
1609 /* Start the force cycle counter.
1610 * Note that a different counter is used for dynamic load balancing.
1612 wallcycle_start(wcycle, ewcFORCE);
1614 /* Set up and clear force outputs:
1615 * forceOutMtsLevel0: everything except what is in the other two outputs
1616 * forceOutMtsLevel1: PME-mesh and listed-forces group 1
1617 * forceOutNonbonded: non-bonded forces
1618 * Without multiple time stepping all point to the same object.
1619 * With multiple time-stepping the use is different for MTS fast (level0 only) and slow steps.
1621 ForceOutputs forceOutMtsLevel0 =
1622 setupForceOutputs(&fr->forceHelperBuffers[0], force, stepWork, wcycle);
1624 // Force output for MTS combined forces, only set at level1 MTS steps
1625 std::optional<ForceOutputs> forceOutMts =
1626 (fr->useMts && stepWork.computeSlowForces)
1627 ? std::optional(setupForceOutputs(&fr->forceHelperBuffers[1],
1628 forceView->forceMtsCombinedWithPadding(),
1629 stepWork, wcycle))
1630 : std::nullopt;
1632 ForceOutputs* forceOutMtsLevel1 =
1633 fr->useMts ? (stepWork.computeSlowForces ? &forceOutMts.value() : nullptr) : &forceOutMtsLevel0;
1635 const bool nonbondedAtMtsLevel1 = runScheduleWork->simulationWork.computeNonbondedAtMtsLevel1;
1637 ForceOutputs* forceOutNonbonded = nonbondedAtMtsLevel1 ? forceOutMtsLevel1 : &forceOutMtsLevel0;
1639 if (inputrec->bPull && pull_have_constraint(pull_work))
1641 clear_pull_forces(pull_work);
1644 /* We calculate the non-bonded forces, when done on the CPU, here.
1645 * We do this before calling do_force_lowlevel, because in that
1646 * function, the listed forces are calculated before PME, which
1647 * does communication. With this order, non-bonded and listed
1648 * force calculation imbalance can be balanced out by the domain
1649 * decomposition load balancing.
1652 const bool useOrEmulateGpuNb = simulationWork.useGpuNonbonded || fr->nbv->emulateGpu();
1654 if (!useOrEmulateGpuNb)
1656 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local, enbvClearFYes, step, nrnb, wcycle);
1659 if (fr->efep != efepNO && stepWork.computeNonbondedForces)
1661 /* Calculate the local and non-local free energy interactions here.
1662 * Happens here on the CPU both with and without GPU.
1664 nbv->dispatchFreeEnergyKernel(InteractionLocality::Local, fr,
1665 as_rvec_array(x.unpaddedArrayRef().data()),
1666 &forceOutNonbonded->forceWithShiftForces(), *mdatoms,
1667 inputrec->fepvals, lambda, enerd, stepWork, nrnb);
1669 if (havePPDomainDecomposition(cr))
1671 nbv->dispatchFreeEnergyKernel(InteractionLocality::NonLocal, fr,
1672 as_rvec_array(x.unpaddedArrayRef().data()),
1673 &forceOutNonbonded->forceWithShiftForces(), *mdatoms,
1674 inputrec->fepvals, lambda, enerd, stepWork, nrnb);
1678 if (stepWork.computeNonbondedForces && !useOrEmulateGpuNb)
1680 if (havePPDomainDecomposition(cr))
1682 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFNo, step,
1683 nrnb, wcycle);
1686 if (stepWork.computeForces)
1688 /* Add all the non-bonded force to the normal force array.
1689 * This can be split into a local and a non-local part when overlapping
1690 * communication with calculation with domain decomposition.
1692 wallcycle_stop(wcycle, ewcFORCE);
1693 nbv->atomdata_add_nbat_f_to_f(AtomLocality::All,
1694 forceOutNonbonded->forceWithShiftForces().force());
1695 wallcycle_start_nocount(wcycle, ewcFORCE);
1698 /* If there are multiple fshift output buffers we need to reduce them */
1699 if (stepWork.computeVirial)
1701 /* This is not in a subcounter because it takes a
1702 negligible and constant-sized amount of time */
1703 nbnxn_atomdata_add_nbat_fshift_to_fshift(
1704 *nbv->nbat, forceOutNonbonded->forceWithShiftForces().shiftForces());
1708 // TODO Force flags should include haveFreeEnergyWork for this domain
1709 if (ddUsesGpuDirectCommunication && (domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork))
1711 /* Wait for non-local coordinate data to be copied from device */
1712 stateGpu->waitCoordinatesReadyOnHost(AtomLocality::NonLocal);
1715 // Compute wall interactions, when present.
1716 // Note: should be moved to special forces.
1717 if (inputrec->nwall && stepWork.computeNonbondedForces)
1719 /* foreign lambda component for walls */
1720 real dvdl_walls = do_walls(*inputrec, *fr, box, *mdatoms, x.unpaddedConstArrayRef(),
1721 &forceOutMtsLevel0.forceWithVirial(), lambda[efptVDW],
1722 enerd->grpp.ener[egLJSR].data(), nrnb);
1723 enerd->dvdl_lin[efptVDW] += dvdl_walls;
1726 if (stepWork.computeListedForces)
1728 /* Check whether we need to take into account PBC in listed interactions */
1729 bool needMolPbc = false;
1730 for (const auto& listedForces : fr->listedForces)
1732 if (listedForces.haveCpuListedForces(*fr->fcdata))
1734 needMolPbc = fr->bMolPBC;
1738 t_pbc pbc;
1740 if (needMolPbc)
1742 /* Since all atoms are in the rectangular or triclinic unit-cell,
1743 * only single box vector shifts (2 in x) are required.
1745 set_pbc_dd(&pbc, fr->pbcType, DOMAINDECOMP(cr) ? cr->dd->numCells : nullptr, TRUE, box);
1748 for (int mtsIndex = 0; mtsIndex < (fr->useMts && stepWork.computeSlowForces ? 2 : 1); mtsIndex++)
1750 ListedForces& listedForces = fr->listedForces[mtsIndex];
1751 ForceOutputs& forceOut = (mtsIndex == 0 ? forceOutMtsLevel0 : *forceOutMtsLevel1);
1752 listedForces.calculate(
1753 wcycle, box, inputrec->fepvals, cr, ms, x, xWholeMolecules, fr->fcdata.get(),
1754 hist, &forceOut, fr, &pbc, enerd, nrnb, lambda.data(), mdatoms,
1755 DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr, stepWork);
1759 if (stepWork.computeSlowForces)
1761 calculateLongRangeNonbondeds(fr, inputrec, cr, nrnb, wcycle, mdatoms,
1762 x.unpaddedConstArrayRef(), &forceOutMtsLevel1->forceWithVirial(),
1763 enerd, box, lambda.data(), as_rvec_array(dipoleData.muStateAB),
1764 stepWork, ddBalanceRegionHandler);
1767 wallcycle_stop(wcycle, ewcFORCE);
1769 // VdW dispersion correction, only computed on master rank to avoid double counting
1770 if ((stepWork.computeEnergy || stepWork.computeVirial) && fr->dispersionCorrection && MASTER(cr))
1772 // Calculate long range corrections to pressure and energy
1773 const DispersionCorrection::Correction correction =
1774 fr->dispersionCorrection->calculate(box, lambda[efptVDW]);
1776 if (stepWork.computeEnergy)
1778 enerd->term[F_DISPCORR] = correction.energy;
1779 enerd->term[F_DVDL_VDW] += correction.dvdl;
1780 enerd->dvdl_lin[efptVDW] += correction.dvdl;
1782 if (stepWork.computeVirial)
1784 correction.correctVirial(vir_force);
1785 enerd->term[F_PDISPCORR] = correction.pressure;
1789 computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation, imdSession, pull_work, step, t,
1790 wcycle, fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda, stepWork,
1791 &forceOutMtsLevel0.forceWithVirial(), enerd, ed, stepWork.doNeighborSearch);
1793 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
1794 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
1795 GMX_ASSERT(!(nonbondedAtMtsLevel1 && useGpuForcesHaloExchange),
1796 "The schedule below does not allow for nonbonded MTS with GPU halo exchange");
1797 // Will store the amount of cycles spent waiting for the GPU that
1798 // will be later used in the DLB accounting.
1799 float cycles_wait_gpu = 0;
1800 if (useOrEmulateGpuNb && stepWork.computeNonbondedForces)
1802 auto& forceWithShiftForces = forceOutNonbonded->forceWithShiftForces();
1804 /* wait for non-local forces (or calculate in emulation mode) */
1805 if (havePPDomainDecomposition(cr))
1807 if (simulationWork.useGpuNonbonded)
1809 cycles_wait_gpu += Nbnxm::gpu_wait_finish_task(
1810 nbv->gpu_nbv, stepWork, AtomLocality::NonLocal, enerd->grpp.ener[egLJSR].data(),
1811 enerd->grpp.ener[egCOULSR].data(), forceWithShiftForces.shiftForces(), wcycle);
1813 else
1815 wallcycle_start_nocount(wcycle, ewcFORCE);
1816 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::NonLocal, enbvClearFYes,
1817 step, nrnb, wcycle);
1818 wallcycle_stop(wcycle, ewcFORCE);
1821 if (stepWork.useGpuFBufferOps)
1823 // TODO: move this into DomainLifetimeWorkload, including the second part of the
1824 // condition The bonded and free energy CPU tasks can have non-local force
1825 // contributions which are a dependency for the GPU force reduction.
1826 bool haveNonLocalForceContribInCpuBuffer =
1827 domainWork.haveCpuBondedWork || domainWork.haveFreeEnergyWork;
1829 if (haveNonLocalForceContribInCpuBuffer)
1831 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
1832 AtomLocality::NonLocal);
1835 fr->gpuForceReduction[gmx::AtomLocality::NonLocal]->execute();
1837 if (!useGpuForcesHaloExchange)
1839 // copy from GPU input for dd_move_f()
1840 stateGpu->copyForcesFromGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
1841 AtomLocality::NonLocal);
1844 else
1846 nbv->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal, forceWithShiftForces.force());
1849 if (fr->nbv->emulateGpu() && stepWork.computeVirial)
1851 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat, forceWithShiftForces.shiftForces());
1856 /* Combining the forces for multiple time stepping before the halo exchange, when possible,
1857 * avoids an extra halo exchange (when DD is used) and post-processing step.
1859 const bool combineMtsForcesBeforeHaloExchange =
1860 (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
1861 && (legacyFlags & GMX_FORCE_DO_NOT_NEED_NORMAL_FORCE) != 0
1862 && !(stepWork.computeVirial || simulationWork.useGpuNonbonded || useGpuPmeOnThisRank));
1863 if (combineMtsForcesBeforeHaloExchange)
1865 const int numAtoms = havePPDomainDecomposition(cr) ? dd_numAtomsZones(*cr->dd) : mdatoms->homenr;
1866 combineMtsForces(numAtoms, force.unpaddedArrayRef(), forceView->forceMtsCombined(),
1867 inputrec->mtsLevels[1].stepFactor);
1870 if (havePPDomainDecomposition(cr))
1872 /* We are done with the CPU compute.
1873 * We will now communicate the non-local forces.
1874 * If we use a GPU this will overlap with GPU work, so in that case
1875 * we do not close the DD force balancing region here.
1877 ddBalanceRegionHandler.closeAfterForceComputationCpu();
1879 if (stepWork.computeForces)
1881 if (useGpuForcesHaloExchange)
1883 if (domainWork.haveCpuLocalForceWork)
1885 stateGpu->copyForcesToGpu(forceOutMtsLevel0.forceWithShiftForces().force(),
1886 AtomLocality::Local);
1888 communicateGpuHaloForces(*cr, domainWork.haveCpuLocalForceWork);
1890 else
1892 if (stepWork.useGpuFBufferOps)
1894 stateGpu->waitForcesReadyOnHost(AtomLocality::NonLocal);
1897 // Without MTS or with MTS at slow steps with uncombined forces we need to
1898 // communicate the fast forces
1899 if (!fr->useMts || !combineMtsForcesBeforeHaloExchange)
1901 dd_move_f(cr->dd, &forceOutMtsLevel0.forceWithShiftForces(), wcycle);
1903 // With MTS we need to communicate the slow or combined (in forceOutMtsLevel1) forces
1904 if (fr->useMts && stepWork.computeSlowForces)
1906 dd_move_f(cr->dd, &forceOutMtsLevel1->forceWithShiftForces(), wcycle);
1912 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1913 // an alternating wait/reduction scheme.
1914 bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPmeOnThisRank && simulationWork.useGpuNonbonded
1915 && !DOMAINDECOMP(cr) && !stepWork.useGpuFBufferOps);
1916 if (alternateGpuWait)
1918 alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, forceOutNonbonded,
1919 forceOutMtsLevel1, enerd, lambda[efptCOUL], stepWork, wcycle);
1922 if (!alternateGpuWait && useGpuPmeOnThisRank)
1924 pme_gpu_wait_and_reduce(fr->pmedata, stepWork, wcycle,
1925 &forceOutMtsLevel1->forceWithVirial(), enerd, lambda[efptCOUL]);
1928 /* Wait for local GPU NB outputs on the non-alternating wait path */
1929 if (!alternateGpuWait && stepWork.computeNonbondedForces && simulationWork.useGpuNonbonded)
1931 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1932 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1933 * but even with a step of 0.1 ms the difference is less than 1%
1934 * of the step time.
1936 const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1937 const float waitCycles = Nbnxm::gpu_wait_finish_task(
1938 nbv->gpu_nbv, stepWork, AtomLocality::Local, enerd->grpp.ener[egLJSR].data(),
1939 enerd->grpp.ener[egCOULSR].data(),
1940 forceOutNonbonded->forceWithShiftForces().shiftForces(), wcycle);
1942 if (ddBalanceRegionHandler.useBalancingRegion())
1944 DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1945 if (stepWork.computeForces && waitCycles <= gpuWaitApiOverheadMargin)
1947 /* We measured few cycles, it could be that the kernel
1948 * and transfer finished earlier and there was no actual
1949 * wait time, only API call overhead.
1950 * Then the actual time could be anywhere between 0 and
1951 * cycles_wait_est. We will use half of cycles_wait_est.
1953 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1955 ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1959 if (fr->nbv->emulateGpu())
1961 // NOTE: emulation kernel is not included in the balancing region,
1962 // but emulation mode does not target performance anyway
1963 wallcycle_start_nocount(wcycle, ewcFORCE);
1964 do_nb_verlet(fr, ic, enerd, stepWork, InteractionLocality::Local,
1965 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes, step, nrnb, wcycle);
1966 wallcycle_stop(wcycle, ewcFORCE);
1969 // If on GPU PME-PP comms or GPU update path, receive forces from PME before GPU buffer ops
1970 // TODO refactor this and unify with below default-path call to the same function
1971 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && stepWork.computeSlowForces
1972 && (simulationWork.useGpuPmePpCommunication || simulationWork.useGpuUpdate))
1974 /* In case of node-splitting, the PP nodes receive the long-range
1975 * forces, virial and energy from the PME nodes here.
1977 pme_receive_force_ener(fr, cr, &forceOutMtsLevel1->forceWithVirial(), enerd,
1978 simulationWork.useGpuPmePpCommunication,
1979 stepWork.useGpuPmeFReduction, wcycle);
1983 /* Do the nonbonded GPU (or emulation) force buffer reduction
1984 * on the non-alternating path. */
1985 GMX_ASSERT(!(nonbondedAtMtsLevel1 && stepWork.useGpuFBufferOps),
1986 "The schedule below does not allow for nonbonded MTS with GPU buffer ops");
1987 if (useOrEmulateGpuNb && !alternateGpuWait)
1989 if (stepWork.useGpuFBufferOps)
1991 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
1993 // Flag to specify whether the CPU force buffer has contributions to
1994 // local atoms. This depends on whether there are CPU-based force tasks
1995 // or when DD is active the halo exchange has resulted in contributions
1996 // from the non-local part.
1997 const bool haveLocalForceContribInCpuBuffer =
1998 (domainWork.haveCpuLocalForceWork || havePPDomainDecomposition(cr));
2000 // TODO: move these steps as early as possible:
2001 // - CPU f H2D should be as soon as all CPU-side forces are done
2002 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
2003 // before the next CPU task that consumes the forces: vsite spread or update)
2004 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
2005 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
2006 // These should be unified.
2007 if (haveLocalForceContribInCpuBuffer && !useGpuForcesHaloExchange)
2009 // Note: AtomLocality::All is used for the non-DD case because, as in this
2010 // case copyForcesToGpu() uses a separate stream, it allows overlap of
2011 // CPU force H2D with GPU force tasks on all streams including those in the
2012 // local stream which would otherwise be implicit dependencies for the
2013 // transfer and would not overlap.
2014 auto locality = havePPDomainDecomposition(cr) ? AtomLocality::Local : AtomLocality::All;
2016 stateGpu->copyForcesToGpu(forceWithShift, locality);
2019 if (stepWork.computeNonbondedForces)
2021 fr->gpuForceReduction[gmx::AtomLocality::Local]->execute();
2024 // Copy forces to host if they are needed for update or if virtual sites are enabled.
2025 // If there are vsites, we need to copy forces every step to spread vsite forces on host.
2026 // TODO: When the output flags will be included in step workload, this copy can be combined with the
2027 // copy call done in sim_utils(...) for the output.
2028 // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
2029 // they should not be copied in do_md(...) for the output.
2030 if (!simulationWork.useGpuUpdate || vsite)
2032 stateGpu->copyForcesFromGpu(forceWithShift, AtomLocality::Local);
2033 stateGpu->waitForcesReadyOnHost(AtomLocality::Local);
2036 else if (stepWork.computeNonbondedForces)
2038 ArrayRef<gmx::RVec> forceWithShift = forceOutNonbonded->forceWithShiftForces().force();
2039 nbv->atomdata_add_nbat_f_to_f(AtomLocality::Local, forceWithShift);
2043 launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd, *runScheduleWork,
2044 useGpuPmeOnThisRank, step, wcycle);
2046 if (DOMAINDECOMP(cr))
2048 dd_force_flop_stop(cr->dd, nrnb);
2051 const bool haveCombinedMtsForces = (stepWork.computeForces && fr->useMts && stepWork.computeSlowForces
2052 && combineMtsForcesBeforeHaloExchange);
2053 if (stepWork.computeForces)
2055 postProcessForceWithShiftForces(nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutMtsLevel0,
2056 vir_force, *mdatoms, *fr, vsite, stepWork);
2058 if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2060 postProcessForceWithShiftForces(nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1,
2061 vir_force, *mdatoms, *fr, vsite, stepWork);
2065 // TODO refactor this and unify with above GPU PME-PP / GPU update path call to the same function
2066 if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME) && !simulationWork.useGpuPmePpCommunication
2067 && !simulationWork.useGpuUpdate && stepWork.computeSlowForces)
2069 /* In case of node-splitting, the PP nodes receive the long-range
2070 * forces, virial and energy from the PME nodes here.
2072 pme_receive_force_ener(fr, cr, &forceOutMtsLevel1->forceWithVirial(), enerd,
2073 simulationWork.useGpuPmePpCommunication, false, wcycle);
2076 if (stepWork.computeForces)
2078 /* If we don't use MTS or if we already combined the MTS forces before, we only
2079 * need to post-process one ForceOutputs object here, called forceOutCombined,
2080 * otherwise we have to post-process two outputs and then combine them.
2082 ForceOutputs& forceOutCombined = (haveCombinedMtsForces ? forceOutMts.value() : forceOutMtsLevel0);
2083 postProcessForces(cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), &forceOutCombined,
2084 vir_force, mdatoms, fr, vsite, stepWork);
2086 if (fr->useMts && stepWork.computeSlowForces && !haveCombinedMtsForces)
2088 postProcessForces(cr, step, nrnb, wcycle, box, x.unpaddedArrayRef(), forceOutMtsLevel1,
2089 vir_force, mdatoms, fr, vsite, stepWork);
2091 combineMtsForces(mdatoms->homenr, force.unpaddedArrayRef(),
2092 forceView->forceMtsCombined(), inputrec->mtsLevels[1].stepFactor);
2096 if (stepWork.computeEnergy)
2098 /* Compute the final potential energy terms */
2099 accumulatePotentialEnergies(enerd, lambda, inputrec->fepvals);
2101 if (!EI_TPI(inputrec->eI))
2103 checkPotentialEnergyValidity(step, *enerd, *inputrec);
2107 /* In case we don't have constraints and are using GPUs, the next balancing
2108 * region starts here.
2109 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
2110 * virial calculation and COM pulling, is not thus not included in
2111 * the balance timing, which is ok as most tasks do communication.
2113 ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);