2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/gpuhaloexchange.h"
53 #include "gromacs/domdec/partition.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/ewald/pme_pp_comm_gpu.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed_forces/disre.h"
64 #include "gromacs/listed_forces/gpubonded.h"
65 #include "gromacs/listed_forces/listed_forces.h"
66 #include "gromacs/listed_forces/manage_threading.h"
67 #include "gromacs/listed_forces/orires.h"
68 #include "gromacs/math/arrayrefwithpadding.h"
69 #include "gromacs/math/functions.h"
70 #include "gromacs/math/units.h"
71 #include "gromacs/math/vec.h"
72 #include "gromacs/math/vecdump.h"
73 #include "gromacs/mdlib/calcmu.h"
74 #include "gromacs/mdlib/calcvir.h"
75 #include "gromacs/mdlib/constr.h"
76 #include "gromacs/mdlib/enerdata_utils.h"
77 #include "gromacs/mdlib/force.h"
78 #include "gromacs/mdlib/forcerec.h"
79 #include "gromacs/mdlib/gmx_omp_nthreads.h"
80 #include "gromacs/mdlib/qmmm.h"
81 #include "gromacs/mdlib/update.h"
82 #include "gromacs/mdtypes/commrec.h"
83 #include "gromacs/mdtypes/enerdata.h"
84 #include "gromacs/mdtypes/forceoutput.h"
85 #include "gromacs/mdtypes/iforceprovider.h"
86 #include "gromacs/mdtypes/inputrec.h"
87 #include "gromacs/mdtypes/md_enums.h"
88 #include "gromacs/mdtypes/simulation_workload.h"
89 #include "gromacs/mdtypes/state.h"
90 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
91 #include "gromacs/nbnxm/gpu_data_mgmt.h"
92 #include "gromacs/nbnxm/nbnxm.h"
93 #include "gromacs/pbcutil/ishift.h"
94 #include "gromacs/pbcutil/mshift.h"
95 #include "gromacs/pbcutil/pbc.h"
96 #include "gromacs/pulling/pull.h"
97 #include "gromacs/pulling/pull_rotation.h"
98 #include "gromacs/timing/cyclecounter.h"
99 #include "gromacs/timing/gpu_timing.h"
100 #include "gromacs/timing/wallcycle.h"
101 #include "gromacs/timing/wallcyclereporting.h"
102 #include "gromacs/timing/walltime_accounting.h"
103 #include "gromacs/topology/topology.h"
104 #include "gromacs/utility/arrayref.h"
105 #include "gromacs/utility/basedefinitions.h"
106 #include "gromacs/utility/cstringutil.h"
107 #include "gromacs/utility/exceptions.h"
108 #include "gromacs/utility/fatalerror.h"
109 #include "gromacs/utility/fixedcapacityvector.h"
110 #include "gromacs/utility/gmxassert.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/smalloc.h"
114 #include "gromacs/utility/strconvert.h"
115 #include "gromacs/utility/sysinfo.h"
117 using gmx::ForceOutputs
;
118 using gmx::StepWorkload
;
119 using gmx::DomainLifetimeWorkload
;
120 using gmx::SimulationWorkload
;
121 using gmx::AtomLocality
;
122 using gmx::InteractionLocality
;
124 // TODO: this environment variable allows us to verify before release
125 // that on less common architectures the total cost of polling is not larger than
126 // a blocking wait (so polling does not introduce overhead when the static
127 // PME-first ordering would suffice).
128 static const bool c_disableAlternatingWait
= (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
130 static void sum_forces(rvec f
[], gmx::ArrayRef
<const gmx::RVec
> forceToAdd
)
132 const int end
= forceToAdd
.size();
134 int gmx_unused nt
= gmx_omp_nthreads_get(emntDefault
);
135 #pragma omp parallel for num_threads(nt) schedule(static)
136 for (int i
= 0; i
< end
; i
++)
138 rvec_inc(f
[i
], forceToAdd
[i
]);
142 static void calc_virial(int start
, int homenr
, const rvec x
[],
143 const gmx::ForceWithShiftForces
&forceWithShiftForces
,
144 tensor vir_part
, const t_graph
*graph
, const matrix box
,
145 t_nrnb
*nrnb
, const t_forcerec
*fr
, int ePBC
)
147 /* The short-range virial from surrounding boxes */
148 const rvec
*fshift
= as_rvec_array(forceWithShiftForces
.shiftForces().data());
149 calc_vir(SHIFTS
, fr
->shift_vec
, fshift
, vir_part
, ePBC
== epbcSCREW
, box
);
150 inc_nrnb(nrnb
, eNR_VIRIAL
, SHIFTS
);
152 /* Calculate partial virial, for local atoms only, based on short range.
153 * Total virial is computed in global_stat, called from do_md
155 const rvec
*f
= as_rvec_array(forceWithShiftForces
.force().data());
156 f_calc_vir(start
, start
+homenr
, x
, f
, vir_part
, graph
, box
);
157 inc_nrnb(nrnb
, eNR_VIRIAL
, homenr
);
161 pr_rvecs(debug
, 0, "vir_part", vir_part
, DIM
);
165 static void pull_potential_wrapper(const t_commrec
*cr
,
166 const t_inputrec
*ir
,
167 const matrix box
, gmx::ArrayRef
<const gmx::RVec
> x
,
168 gmx::ForceWithVirial
*force
,
169 const t_mdatoms
*mdatoms
,
170 gmx_enerdata_t
*enerd
,
174 gmx_wallcycle_t wcycle
)
179 /* Calculate the center of mass forces, this requires communication,
180 * which is why pull_potential is called close to other communication.
182 wallcycle_start(wcycle
, ewcPULLPOT
);
183 set_pbc(&pbc
, ir
->ePBC
, box
);
185 enerd
->term
[F_COM_PULL
] +=
186 pull_potential(pull_work
, mdatoms
, &pbc
,
187 cr
, t
, lambda
[efptRESTRAINT
], as_rvec_array(x
.data()), force
, &dvdl
);
188 enerd
->dvdl_lin
[efptRESTRAINT
] += dvdl
;
189 wallcycle_stop(wcycle
, ewcPULLPOT
);
192 static void pme_receive_force_ener(t_forcerec
*fr
,
194 gmx::ForceWithVirial
*forceWithVirial
,
195 gmx_enerdata_t
*enerd
,
196 bool useGpuPmePpComms
,
197 bool receivePmeForceToGpu
,
198 gmx_wallcycle_t wcycle
)
200 real e_q
, e_lj
, dvdl_q
, dvdl_lj
;
201 float cycles_ppdpme
, cycles_seppme
;
203 cycles_ppdpme
= wallcycle_stop(wcycle
, ewcPPDURINGPME
);
204 dd_cycles_add(cr
->dd
, cycles_ppdpme
, ddCyclPPduringPME
);
206 /* In case of node-splitting, the PP nodes receive the long-range
207 * forces, virial and energy from the PME nodes here.
209 wallcycle_start(wcycle
, ewcPP_PMEWAITRECVF
);
212 gmx_pme_receive_f(fr
->pmePpCommGpu
.get(),
213 cr
, forceWithVirial
, &e_q
, &e_lj
, &dvdl_q
, &dvdl_lj
,
214 useGpuPmePpComms
, receivePmeForceToGpu
, &cycles_seppme
);
215 enerd
->term
[F_COUL_RECIP
] += e_q
;
216 enerd
->term
[F_LJ_RECIP
] += e_lj
;
217 enerd
->dvdl_lin
[efptCOUL
] += dvdl_q
;
218 enerd
->dvdl_lin
[efptVDW
] += dvdl_lj
;
222 dd_cycles_add(cr
->dd
, cycles_seppme
, ddCyclPME
);
224 wallcycle_stop(wcycle
, ewcPP_PMEWAITRECVF
);
227 static void print_large_forces(FILE *fp
,
235 real force2Tolerance
= gmx::square(forceTolerance
);
236 gmx::index numNonFinite
= 0;
237 for (int i
= 0; i
< md
->homenr
; i
++)
239 real force2
= norm2(f
[i
]);
240 bool nonFinite
= !std::isfinite(force2
);
241 if (force2
>= force2Tolerance
|| nonFinite
)
243 fprintf(fp
, "step %" PRId64
" atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
245 ddglatnr(cr
->dd
, i
), x
[i
][XX
], x
[i
][YY
], x
[i
][ZZ
], std::sqrt(force2
));
252 if (numNonFinite
> 0)
254 /* Note that with MPI this fatal call on one rank might interrupt
255 * the printing on other ranks. But we can only avoid that with
256 * an expensive MPI barrier that we would need at each step.
258 gmx_fatal(FARGS
, "At step %" PRId64
" detected non-finite forces on %td atoms", step
, numNonFinite
);
262 static void post_process_forces(const t_commrec
*cr
,
265 gmx_wallcycle_t wcycle
,
266 const gmx_localtop_t
*top
,
269 ForceOutputs
*forceOutputs
,
271 const t_mdatoms
*mdatoms
,
272 const t_graph
*graph
,
273 const t_forcerec
*fr
,
274 const gmx_vsite_t
*vsite
,
275 const StepWorkload
&stepWork
)
277 rvec
*f
= as_rvec_array(forceOutputs
->forceWithShiftForces().force().data());
279 if (fr
->haveDirectVirialContributions
)
281 auto &forceWithVirial
= forceOutputs
->forceWithVirial();
282 rvec
*fDirectVir
= as_rvec_array(forceWithVirial
.force_
.data());
286 /* Spread the mesh force on virtual sites to the other particles...
287 * This is parallellized. MPI communication is performed
288 * if the constructing atoms aren't local.
290 matrix virial
= { { 0 } };
291 spread_vsite_f(vsite
, x
, fDirectVir
, nullptr,
292 stepWork
.computeVirial
, virial
,
294 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
, wcycle
);
295 forceWithVirial
.addVirialContribution(virial
);
298 if (stepWork
.computeVirial
)
300 /* Now add the forces, this is local */
301 sum_forces(f
, forceWithVirial
.force_
);
303 /* Add the direct virial contributions */
304 GMX_ASSERT(forceWithVirial
.computeVirial_
, "forceWithVirial should request virial computation when we request the virial");
305 m_add(vir_force
, forceWithVirial
.getVirial(), vir_force
);
309 pr_rvecs(debug
, 0, "vir_force", vir_force
, DIM
);
314 if (fr
->print_force
>= 0)
316 print_large_forces(stderr
, mdatoms
, cr
, step
, fr
->print_force
, x
, f
);
320 static void do_nb_verlet(t_forcerec
*fr
,
321 const interaction_const_t
*ic
,
322 gmx_enerdata_t
*enerd
,
323 const StepWorkload
&stepWork
,
324 const InteractionLocality ilocality
,
328 gmx_wallcycle_t wcycle
)
330 if (!stepWork
.computeNonbondedForces
)
332 /* skip non-bonded calculation */
336 nonbonded_verlet_t
*nbv
= fr
->nbv
.get();
338 /* GPU kernel launch overhead is already timed separately */
339 if (fr
->cutoff_scheme
!= ecutsVERLET
)
341 gmx_incons("Invalid cut-off scheme passed!");
346 /* When dynamic pair-list pruning is requested, we need to prune
347 * at nstlistPrune steps.
349 if (nbv
->isDynamicPruningStepCpu(step
))
351 /* Prune the pair-list beyond fr->ic->rlistPrune using
352 * the current coordinates of the atoms.
354 wallcycle_sub_start(wcycle
, ewcsNONBONDED_PRUNING
);
355 nbv
->dispatchPruneKernelCpu(ilocality
, fr
->shift_vec
);
356 wallcycle_sub_stop(wcycle
, ewcsNONBONDED_PRUNING
);
360 nbv
->dispatchNonbondedKernel(ilocality
, *ic
, stepWork
, clearF
, *fr
, enerd
, nrnb
);
363 static inline void clear_rvecs_omp(int n
, rvec v
[])
365 int nth
= gmx_omp_nthreads_get_simple_rvec_task(emntDefault
, n
);
367 /* Note that we would like to avoid this conditional by putting it
368 * into the omp pragma instead, but then we still take the full
369 * omp parallel for overhead (at least with gcc5).
373 for (int i
= 0; i
< n
; i
++)
380 #pragma omp parallel for num_threads(nth) schedule(static)
381 for (int i
= 0; i
< n
; i
++)
388 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
390 * \param groupOptions Group options, containing T-coupling options
392 static real
averageKineticEnergyEstimate(const t_grpopts
&groupOptions
)
394 real nrdfCoupled
= 0;
395 real nrdfUncoupled
= 0;
396 real kineticEnergy
= 0;
397 for (int g
= 0; g
< groupOptions
.ngtc
; g
++)
399 if (groupOptions
.tau_t
[g
] >= 0)
401 nrdfCoupled
+= groupOptions
.nrdf
[g
];
402 kineticEnergy
+= groupOptions
.nrdf
[g
]*0.5*groupOptions
.ref_t
[g
]*BOLTZ
;
406 nrdfUncoupled
+= groupOptions
.nrdf
[g
];
410 /* This conditional with > also catches nrdf=0 */
411 if (nrdfCoupled
> nrdfUncoupled
)
413 return kineticEnergy
*(nrdfCoupled
+ nrdfUncoupled
)/nrdfCoupled
;
421 /*! \brief This routine checks that the potential energy is finite.
423 * Always checks that the potential energy is finite. If step equals
424 * inputrec.init_step also checks that the magnitude of the potential energy
425 * is reasonable. Terminates with a fatal error when a check fails.
426 * Note that passing this check does not guarantee finite forces,
427 * since those use slightly different arithmetics. But in most cases
428 * there is just a narrow coordinate range where forces are not finite
429 * and energies are finite.
431 * \param[in] step The step number, used for checking and printing
432 * \param[in] enerd The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
433 * \param[in] inputrec The input record
435 static void checkPotentialEnergyValidity(int64_t step
,
436 const gmx_enerdata_t
&enerd
,
437 const t_inputrec
&inputrec
)
439 /* Threshold valid for comparing absolute potential energy against
440 * the kinetic energy. Normally one should not consider absolute
441 * potential energy values, but with a factor of one million
442 * we should never get false positives.
444 constexpr real c_thresholdFactor
= 1e6
;
446 bool energyIsNotFinite
= !std::isfinite(enerd
.term
[F_EPOT
]);
447 real averageKineticEnergy
= 0;
448 /* We only check for large potential energy at the initial step,
449 * because that is by far the most likely step for this too occur
450 * and because computing the average kinetic energy is not free.
451 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
452 * before they become NaN.
454 if (step
== inputrec
.init_step
&& EI_DYNAMICS(inputrec
.eI
))
456 averageKineticEnergy
= averageKineticEnergyEstimate(inputrec
.opts
);
459 if (energyIsNotFinite
|| (averageKineticEnergy
> 0 &&
460 enerd
.term
[F_EPOT
] > c_thresholdFactor
*averageKineticEnergy
))
462 gmx_fatal(FARGS
, "Step %" PRId64
": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
465 energyIsNotFinite
? "not finite" : "extremely high",
467 enerd
.term
[F_COUL_SR
],
468 energyIsNotFinite
? "non-finite" : "very high",
469 energyIsNotFinite
? " or Nan" : "");
473 /*! \brief Return true if there are special forces computed this step.
475 * The conditionals exactly correspond to those in computeSpecialForces().
478 haveSpecialForces(const t_inputrec
&inputrec
,
479 const gmx::ForceProviders
&forceProviders
,
480 const pull_t
*pull_work
,
481 const bool computeForces
,
486 ((computeForces
&& forceProviders
.hasForceProvider()) || // forceProviders
487 (inputrec
.bPull
&& pull_have_potential(pull_work
)) || // pull
488 inputrec
.bRot
|| // enforced rotation
489 (ed
!= nullptr) || // flooding
490 (inputrec
.bIMD
&& computeForces
)); // IMD
493 /*! \brief Compute forces and/or energies for special algorithms
495 * The intention is to collect all calls to algorithms that compute
496 * forces on local atoms only and that do not contribute to the local
497 * virial sum (but add their virial contribution separately).
498 * Eventually these should likely all become ForceProviders.
499 * Within this function the intention is to have algorithms that do
500 * global communication at the end, so global barriers within the MD loop
501 * are as close together as possible.
503 * \param[in] fplog The log file
504 * \param[in] cr The communication record
505 * \param[in] inputrec The input record
506 * \param[in] awh The Awh module (nullptr if none in use).
507 * \param[in] enforcedRotation Enforced rotation module.
508 * \param[in] imdSession The IMD session
509 * \param[in] pull_work The pull work structure.
510 * \param[in] step The current MD step
511 * \param[in] t The current time
512 * \param[in,out] wcycle Wallcycle accounting struct
513 * \param[in,out] forceProviders Pointer to a list of force providers
514 * \param[in] box The unit cell
515 * \param[in] x The coordinates
516 * \param[in] mdatoms Per atom properties
517 * \param[in] lambda Array of free-energy lambda values
518 * \param[in] stepWork Step schedule flags
519 * \param[in,out] forceWithVirial Force and virial buffers
520 * \param[in,out] enerd Energy buffer
521 * \param[in,out] ed Essential dynamics pointer
522 * \param[in] didNeighborSearch Tells if we did neighbor searching this step, used for ED sampling
524 * \todo Remove didNeighborSearch, which is used incorrectly.
525 * \todo Convert all other algorithms called here to ForceProviders.
528 computeSpecialForces(FILE *fplog
,
530 const t_inputrec
*inputrec
,
532 gmx_enfrot
*enforcedRotation
,
533 gmx::ImdSession
*imdSession
,
537 gmx_wallcycle_t wcycle
,
538 gmx::ForceProviders
*forceProviders
,
540 gmx::ArrayRef
<const gmx::RVec
> x
,
541 const t_mdatoms
*mdatoms
,
543 const StepWorkload
&stepWork
,
544 gmx::ForceWithVirial
*forceWithVirial
,
545 gmx_enerdata_t
*enerd
,
547 bool didNeighborSearch
)
549 /* NOTE: Currently all ForceProviders only provide forces.
550 * When they also provide energies, remove this conditional.
552 if (stepWork
.computeForces
)
554 gmx::ForceProviderInput
forceProviderInput(x
, *mdatoms
, t
, box
, *cr
);
555 gmx::ForceProviderOutput
forceProviderOutput(forceWithVirial
, enerd
);
557 /* Collect forces from modules */
558 forceProviders
->calculateForces(forceProviderInput
, &forceProviderOutput
);
561 if (inputrec
->bPull
&& pull_have_potential(pull_work
))
563 pull_potential_wrapper(cr
, inputrec
, box
, x
,
565 mdatoms
, enerd
, pull_work
, lambda
, t
,
570 enerd
->term
[F_COM_PULL
] +=
571 awh
->applyBiasForcesAndUpdateBias(inputrec
->ePBC
, *mdatoms
, box
,
573 t
, step
, wcycle
, fplog
);
577 rvec
*f
= as_rvec_array(forceWithVirial
->force_
.data());
579 /* Add the forces from enforced rotation potentials (if any) */
582 wallcycle_start(wcycle
, ewcROTadd
);
583 enerd
->term
[F_COM_PULL
] += add_rot_forces(enforcedRotation
, f
, cr
, step
, t
);
584 wallcycle_stop(wcycle
, ewcROTadd
);
589 /* Note that since init_edsam() is called after the initialization
590 * of forcerec, edsam doesn't request the noVirSum force buffer.
591 * Thus if no other algorithm (e.g. PME) requires it, the forces
592 * here will contribute to the virial.
594 do_flood(cr
, inputrec
, as_rvec_array(x
.data()), f
, ed
, box
, step
, didNeighborSearch
);
597 /* Add forces from interactive molecular dynamics (IMD), if any */
598 if (inputrec
->bIMD
&& stepWork
.computeForces
)
600 imdSession
->applyForces(f
);
604 /*! \brief Makes PME flags from StepWorkload data.
606 * \param[in] stepWork Step schedule flags
609 static int makePmeFlags(const StepWorkload
&stepWork
)
611 return GMX_PME_SPREAD
| GMX_PME_SOLVE
|
612 (stepWork
.computeVirial
? GMX_PME_CALC_ENER_VIR
: 0) |
613 (stepWork
.computeEnergy
? GMX_PME_CALC_ENER_VIR
: 0) |
614 (stepWork
.computeForces
? GMX_PME_CALC_F
: 0);
617 /*! \brief Launch the prepare_step and spread stages of PME GPU.
619 * \param[in] pmedata The PME structure
620 * \param[in] box The box matrix
621 * \param[in] stepWork Step schedule flags
622 * \param[in] pmeFlags PME flags
623 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory.
624 * \param[in] wcycle The wallcycle structure
626 static inline void launchPmeGpuSpread(gmx_pme_t
*pmedata
,
628 const StepWorkload
&stepWork
,
630 GpuEventSynchronizer
*xReadyOnDevice
,
631 gmx_wallcycle_t wcycle
)
633 pme_gpu_prepare_computation(pmedata
, stepWork
.haveDynamicBox
, box
, wcycle
, pmeFlags
, stepWork
.useGpuPmeFReduction
);
634 pme_gpu_launch_spread(pmedata
, xReadyOnDevice
, wcycle
);
637 /*! \brief Launch the FFT and gather stages of PME GPU
639 * This function only implements setting the output forces (no accumulation).
641 * \param[in] pmedata The PME structure
642 * \param[in] wcycle The wallcycle structure
644 static void launchPmeGpuFftAndGather(gmx_pme_t
*pmedata
,
645 gmx_wallcycle_t wcycle
)
647 pme_gpu_launch_complex_transforms(pmedata
, wcycle
);
648 pme_gpu_launch_gather(pmedata
, wcycle
, PmeForceOutputHandling::Set
);
652 * Polling wait for either of the PME or nonbonded GPU tasks.
654 * Instead of a static order in waiting for GPU tasks, this function
655 * polls checking which of the two tasks completes first, and does the
656 * associated force buffer reduction overlapped with the other task.
657 * By doing that, unlike static scheduling order, it can always overlap
658 * one of the reductions, regardless of the GPU task completion order.
660 * \param[in] nbv Nonbonded verlet structure
661 * \param[in,out] pmedata PME module data
662 * \param[in,out] forceOutputs Output buffer for the forces and virial
663 * \param[in,out] enerd Energy data structure results are reduced into
664 * \param[in] stepWork Step schedule flags
665 * \param[in] pmeFlags PME flags
666 * \param[in] wcycle The wallcycle structure
668 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t
*nbv
,
670 gmx::ForceOutputs
*forceOutputs
,
671 gmx_enerdata_t
*enerd
,
672 const StepWorkload
&stepWork
,
674 gmx_wallcycle_t wcycle
)
676 bool isPmeGpuDone
= false;
677 bool isNbGpuDone
= false;
681 gmx::ForceWithShiftForces
&forceWithShiftForces
= forceOutputs
->forceWithShiftForces();
682 gmx::ForceWithVirial
&forceWithVirial
= forceOutputs
->forceWithVirial();
684 gmx::ArrayRef
<const gmx::RVec
> pmeGpuForces
;
686 while (!isPmeGpuDone
|| !isNbGpuDone
)
690 GpuTaskCompletion completionType
= (isNbGpuDone
) ? GpuTaskCompletion::Wait
: GpuTaskCompletion::Check
;
691 isPmeGpuDone
= pme_gpu_try_finish_task(pmedata
, pmeFlags
, wcycle
, &forceWithVirial
, enerd
, completionType
);
696 GpuTaskCompletion completionType
= (isPmeGpuDone
) ? GpuTaskCompletion::Wait
: GpuTaskCompletion::Check
;
697 isNbGpuDone
= Nbnxm::gpu_try_finish_task(nbv
->gpu_nbv
,
700 enerd
->grpp
.ener
[egLJSR
].data(),
701 enerd
->grpp
.ener
[egCOULSR
].data(),
702 forceWithShiftForces
.shiftForces(), completionType
, wcycle
);
706 nbv
->atomdata_add_nbat_f_to_f(AtomLocality::Local
,
707 forceWithShiftForces
.force());
713 /*! \brief Set up the different force buffers; also does clearing.
715 * \param[in] fr force record pointer
716 * \param[in] pull_work The pull work object.
717 * \param[in] inputrec input record
718 * \param[in] force force array
719 * \param[in] stepWork Step schedule flags
720 * \param[out] wcycle wallcycle recording structure
722 * \returns Cleared force output structure
725 setupForceOutputs(t_forcerec
*fr
,
727 const t_inputrec
&inputrec
,
728 gmx::ArrayRefWithPadding
<gmx::RVec
> force
,
729 const StepWorkload
&stepWork
,
730 gmx_wallcycle_t wcycle
)
732 wallcycle_sub_start(wcycle
, ewcsCLEAR_FORCE_BUFFER
);
734 /* NOTE: We assume fr->shiftForces is all zeros here */
735 gmx::ForceWithShiftForces
forceWithShiftForces(force
, stepWork
.computeVirial
, fr
->shiftForces
);
737 if (stepWork
.computeForces
)
739 /* Clear the short- and long-range forces */
740 clear_rvecs_omp(fr
->natoms_force_constr
,
741 as_rvec_array(forceWithShiftForces
.force().data()));
744 /* If we need to compute the virial, we might need a separate
745 * force buffer for algorithms for which the virial is calculated
746 * directly, such as PME. Otherwise, forceWithVirial uses the
747 * the same force (f in legacy calls) buffer as other algorithms.
749 const bool useSeparateForceWithVirialBuffer
= (stepWork
.computeForces
&&
750 (stepWork
.computeVirial
&& fr
->haveDirectVirialContributions
));
751 /* forceWithVirial uses the local atom range only */
752 gmx::ForceWithVirial
forceWithVirial(useSeparateForceWithVirialBuffer
?
753 fr
->forceBufferForDirectVirialContributions
: force
.unpaddedArrayRef(),
754 stepWork
.computeVirial
);
756 if (useSeparateForceWithVirialBuffer
)
758 /* TODO: update comment
759 * We only compute forces on local atoms. Note that vsites can
760 * spread to non-local atoms, but that part of the buffer is
761 * cleared separately in the vsite spreading code.
763 clear_rvecs_omp(forceWithVirial
.force_
.size(), as_rvec_array(forceWithVirial
.force_
.data()));
766 if (inputrec
.bPull
&& pull_have_constraint(pull_work
))
768 clear_pull_forces(pull_work
);
771 wallcycle_sub_stop(wcycle
, ewcsCLEAR_FORCE_BUFFER
);
773 return ForceOutputs(forceWithShiftForces
, forceWithVirial
);
777 /*! \brief Set up flags that have the lifetime of the domain indicating what type of work is there to compute.
779 static DomainLifetimeWorkload
780 setupDomainLifetimeWorkload(const t_inputrec
&inputrec
,
781 const t_forcerec
&fr
,
782 const pull_t
*pull_work
,
786 const t_mdatoms
&mdatoms
,
787 const SimulationWorkload
&simulationWork
,
788 const StepWorkload
&stepWork
)
790 DomainLifetimeWorkload domainWork
;
791 // Note that haveSpecialForces is constant over the whole run
792 domainWork
.haveSpecialForces
= haveSpecialForces(inputrec
, *fr
.forceProviders
, pull_work
, stepWork
.computeForces
, ed
);
793 domainWork
.haveCpuBondedWork
= haveCpuBondeds(fr
);
794 domainWork
.haveGpuBondedWork
= ((fr
.gpuBonded
!= nullptr) && fr
.gpuBonded
->haveInteractions());
795 domainWork
.haveRestraintsWork
= haveRestraints(idef
, fcd
);
796 domainWork
.haveCpuListedForceWork
= haveCpuListedForces(fr
, idef
, fcd
);
797 // Note that haveFreeEnergyWork is constant over the whole run
798 domainWork
.haveFreeEnergyWork
= (fr
.efep
!= efepNO
&& mdatoms
.nPerturbed
!= 0);
799 // We assume we have local force work if there are CPU
800 // force tasks including PME or nonbondeds.
801 domainWork
.haveCpuLocalForceWork
= domainWork
.haveSpecialForces
|| domainWork
.haveCpuListedForceWork
|| domainWork
.haveFreeEnergyWork
||
802 simulationWork
.useCpuNonbonded
|| simulationWork
.useCpuPme
||
803 (EEL_PME_EWALD(inputrec
.coulombtype
) && (inputrec
.ewald_geometry
== eewg3DC
||
804 inputrec
.epsilon_surface
!= 0)) ||
809 /*! \brief Set up force flag stuct from the force bitmask.
811 * \param[in] legacyFlags Force bitmask flags used to construct the new flags
812 * \param[in] isNonbondedOn Global override, if false forces to turn off all nonbonded calculation.
813 * \param[in] simulationWork Simulation workload description.
814 * \param[in] rankHasPmeDuty If this rank computes PME.
816 * \returns New Stepworkload description.
819 setupStepWorkload(const int legacyFlags
,
820 const bool isNonbondedOn
,
821 const SimulationWorkload
&simulationWork
,
822 const bool rankHasPmeDuty
)
825 flags
.stateChanged
= ((legacyFlags
& GMX_FORCE_STATECHANGED
) != 0);
826 flags
.haveDynamicBox
= ((legacyFlags
& GMX_FORCE_DYNAMICBOX
) != 0);
827 flags
.doNeighborSearch
= ((legacyFlags
& GMX_FORCE_NS
) != 0);
828 flags
.computeVirial
= ((legacyFlags
& GMX_FORCE_VIRIAL
) != 0);
829 flags
.computeEnergy
= ((legacyFlags
& GMX_FORCE_ENERGY
) != 0);
830 flags
.computeForces
= ((legacyFlags
& GMX_FORCE_FORCES
) != 0);
831 flags
.computeListedForces
= ((legacyFlags
& GMX_FORCE_LISTED
) != 0);
832 flags
.computeNonbondedForces
= ((legacyFlags
& GMX_FORCE_NONBONDED
) != 0) && isNonbondedOn
;
833 flags
.computeDhdl
= ((legacyFlags
& GMX_FORCE_DHDL
) != 0);
835 if (simulationWork
.useGpuBufferOps
)
837 GMX_ASSERT(simulationWork
.useGpuNonbonded
, "Can only offload buffer ops if nonbonded computation is also offloaded");
839 flags
.useGpuXBufferOps
= simulationWork
.useGpuBufferOps
;
840 // on virial steps the CPU reduction path is taken
841 // TODO: remove flags.computeEnergy, ref #3128
842 flags
.useGpuFBufferOps
= simulationWork
.useGpuBufferOps
&& !(flags
.computeVirial
|| flags
.computeEnergy
);
843 flags
.useGpuPmeFReduction
= flags
.useGpuFBufferOps
&& (simulationWork
.useGpuPme
&&
844 (rankHasPmeDuty
|| simulationWork
.useGpuPmePpCommunication
));
850 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
852 * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
853 * incorporated in DomainLifetimeWorkload.
856 launchGpuEndOfStepTasks(nonbonded_verlet_t
*nbv
,
857 gmx::GpuBonded
*gpuBonded
,
859 gmx_enerdata_t
*enerd
,
860 const gmx::MdrunScheduleWorkload
&runScheduleWork
,
861 bool useGpuNonbonded
,
864 gmx_wallcycle_t wcycle
)
868 /* Launch pruning before buffer clearing because the API overhead of the
869 * clear kernel launches can leave the GPU idle while it could be running
872 if (nbv
->isDynamicPruningStepGpu(step
))
874 nbv
->dispatchPruneKernelGpu(step
);
877 /* now clear the GPU outputs while we finish the step on the CPU */
878 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
879 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
880 Nbnxm::gpu_clear_outputs(nbv
->gpu_nbv
, runScheduleWork
.stepWork
.computeVirial
);
881 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
882 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
887 pme_gpu_reinit_computation(pmedata
, wcycle
);
890 if (runScheduleWork
.domainWork
.haveGpuBondedWork
&& runScheduleWork
.stepWork
.computeEnergy
)
892 // in principle this should be included in the DD balancing region,
893 // but generally it is infrequent so we'll omit it for the sake of
895 gpuBonded
->waitAccumulateEnergyTerms(enerd
);
897 gpuBonded
->clearEnergies();
902 void do_force(FILE *fplog
,
904 const gmx_multisim_t
*ms
,
905 const t_inputrec
*inputrec
,
907 gmx_enfrot
*enforcedRotation
,
908 gmx::ImdSession
*imdSession
,
912 gmx_wallcycle_t wcycle
,
913 const gmx_localtop_t
*top
,
915 gmx::ArrayRefWithPadding
<gmx::RVec
> x
,
917 gmx::ArrayRefWithPadding
<gmx::RVec
> force
,
919 const t_mdatoms
*mdatoms
,
920 gmx_enerdata_t
*enerd
,
922 gmx::ArrayRef
<real
> lambda
,
925 gmx::MdrunScheduleWorkload
*runScheduleWork
,
926 const gmx_vsite_t
*vsite
,
931 const DDBalanceRegionHandler
&ddBalanceRegionHandler
)
935 nonbonded_verlet_t
*nbv
= fr
->nbv
.get();
936 interaction_const_t
*ic
= fr
->ic
;
937 gmx::StatePropagatorDataGpu
*stateGpu
= fr
->stateGpu
;
939 // TODO remove the code below when the legacy flags are not in use anymore
940 /* modify force flag if not doing nonbonded */
943 legacyFlags
&= ~GMX_FORCE_NONBONDED
;
946 const SimulationWorkload
&simulationWork
= runScheduleWork
->simulationWork
;
949 runScheduleWork
->stepWork
= setupStepWorkload(legacyFlags
, fr
->bNonbonded
,
950 simulationWork
, thisRankHasDuty(cr
, DUTY_PME
));
951 const StepWorkload
&stepWork
= runScheduleWork
->stepWork
;
954 const bool useGpuPmeOnThisRank
= simulationWork
.useGpuPme
&& thisRankHasDuty(cr
, DUTY_PME
);
955 const int pmeFlags
= makePmeFlags(stepWork
);
957 // Switches on whether to use GPU for position and force buffer operations
958 // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
959 const BufferOpsUseGpu useGpuXBufOps
= stepWork
.useGpuXBufferOps
? BufferOpsUseGpu::True
: BufferOpsUseGpu::False
;
960 // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
961 const BufferOpsUseGpu useGpuFBufOps
= stepWork
.useGpuFBufferOps
? BufferOpsUseGpu::True
: BufferOpsUseGpu::False
;
963 /* At a search step we need to start the first balancing region
964 * somewhere early inside the step after communication during domain
965 * decomposition (and not during the previous step as usual).
967 if (stepWork
.doNeighborSearch
)
969 ddBalanceRegionHandler
.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes
);
973 const int homenr
= mdatoms
->homenr
;
975 clear_mat(vir_force
);
977 if (stepWork
.stateChanged
)
979 if (inputrecNeedMutot(inputrec
))
981 /* Calculate total (local) dipole moment in a temporary common array.
982 * This makes it possible to sum them over nodes faster.
984 calc_mu(start
, homenr
,
985 x
.unpaddedArrayRef(), mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
990 if (fr
->ePBC
!= epbcNONE
)
992 /* Compute shift vectors every step,
993 * because of pressure coupling or box deformation!
995 if (stepWork
.haveDynamicBox
&& stepWork
.stateChanged
)
997 calc_shifts(box
, fr
->shift_vec
);
1000 const bool fillGrid
= (stepWork
.doNeighborSearch
&& stepWork
.stateChanged
);
1001 const bool calcCGCM
= (fillGrid
&& !DOMAINDECOMP(cr
));
1004 put_atoms_in_box_omp(fr
->ePBC
, box
, x
.unpaddedArrayRef().subArray(0, homenr
), gmx_omp_nthreads_get(emntDefault
));
1005 inc_nrnb(nrnb
, eNR_SHIFTX
, homenr
);
1007 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
1009 unshift_self(graph
, box
, as_rvec_array(x
.unpaddedArrayRef().data()));
1013 nbnxn_atomdata_copy_shiftvec(stepWork
.haveDynamicBox
,
1014 fr
->shift_vec
, nbv
->nbat
.get());
1016 // Coordinates on the device are needed if PME or BufferOps are offloaded.
1017 // The local coordinates can be copied right away.
1018 // NOTE: Consider moving this copy to right after they are updated and constrained,
1019 // if the later is not offloaded.
1020 if (useGpuPmeOnThisRank
|| useGpuXBufOps
== BufferOpsUseGpu::True
)
1022 if (stepWork
.doNeighborSearch
)
1024 stateGpu
->reinit(mdatoms
->homenr
, cr
->dd
!= nullptr ? dd_numAtomsZones(*cr
->dd
) : mdatoms
->homenr
);
1025 if (useGpuPmeOnThisRank
)
1027 // TODO: This should be moved into PME setup function ( pme_gpu_prepare_computation(...) )
1028 pme_gpu_set_device_x(fr
->pmedata
, stateGpu
->getCoordinates());
1031 // We need to copy coordinates when:
1032 // 1. Update is not offloaded
1033 // 2. The buffers were reinitialized on search step
1034 if (!simulationWork
.useGpuUpdate
|| stepWork
.doNeighborSearch
)
1036 stateGpu
->copyCoordinatesToGpu(x
.unpaddedArrayRef(), AtomLocality::Local
);
1040 // Copy coordinate from the GPU if update is on the GPU and there are forces to be computed on the CPU. At search steps the
1041 // current coordinates are already on the host, hence copy is not needed.
1042 if (simulationWork
.useGpuUpdate
&& !stepWork
.doNeighborSearch
&&
1043 runScheduleWork
->domainWork
.haveCpuLocalForceWork
)
1045 stateGpu
->copyCoordinatesFromGpu(x
.unpaddedArrayRef(), AtomLocality::Local
);
1046 stateGpu
->waitCoordinatesReadyOnHost(AtomLocality::Local
);
1049 const auto localXReadyOnDevice
= (stateGpu
!= nullptr) ? stateGpu
->getCoordinatesReadyOnDeviceEvent(AtomLocality::Local
,
1050 simulationWork
, stepWork
) : nullptr;
1053 if (!thisRankHasDuty(cr
, DUTY_PME
))
1055 /* Send particle coordinates to the pme nodes.
1056 * Since this is only implemented for domain decomposition
1057 * and domain decomposition does not use the graph,
1058 * we do not need to worry about shifting.
1060 bool reinitGpuPmePpComms
= simulationWork
.useGpuPmePpCommunication
&& (stepWork
.doNeighborSearch
);
1061 bool sendCoordinatesFromGpu
= simulationWork
.useGpuPmePpCommunication
&& !(stepWork
.doNeighborSearch
);
1062 gmx_pme_send_coordinates(fr
, cr
, box
, as_rvec_array(x
.unpaddedArrayRef().data()),
1063 lambda
[efptCOUL
], lambda
[efptVDW
],
1064 (stepWork
.computeVirial
|| stepWork
.computeEnergy
),
1065 step
, simulationWork
.useGpuPmePpCommunication
, reinitGpuPmePpComms
,
1066 sendCoordinatesFromGpu
, localXReadyOnDevice
, wcycle
);
1068 #endif /* GMX_MPI */
1070 if (useGpuPmeOnThisRank
)
1072 launchPmeGpuSpread(fr
->pmedata
, box
, stepWork
, pmeFlags
,
1073 localXReadyOnDevice
, wcycle
);
1076 /* do gridding for pair search */
1077 if (stepWork
.doNeighborSearch
)
1079 if (graph
&& stepWork
.stateChanged
)
1081 /* Calculate intramolecular shift vectors to make molecules whole */
1082 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, as_rvec_array(x
.unpaddedArrayRef().data()));
1086 // - vzero is constant, do we need to pass it?
1087 // - box_diag should be passed directly to nbnxn_put_on_grid
1093 box_diag
[XX
] = box
[XX
][XX
];
1094 box_diag
[YY
] = box
[YY
][YY
];
1095 box_diag
[ZZ
] = box
[ZZ
][ZZ
];
1097 wallcycle_start(wcycle
, ewcNS
);
1098 if (!DOMAINDECOMP(cr
))
1100 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_LOCAL
);
1101 nbnxn_put_on_grid(nbv
, box
,
1103 nullptr, { 0, mdatoms
->homenr
}, -1,
1104 fr
->cginfo
, x
.unpaddedArrayRef(),
1106 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_LOCAL
);
1110 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_NONLOCAL
);
1111 nbnxn_put_on_grid_nonlocal(nbv
, domdec_zones(cr
->dd
),
1112 fr
->cginfo
, x
.unpaddedArrayRef());
1113 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_NONLOCAL
);
1116 nbv
->setAtomProperties(*mdatoms
, fr
->cginfo
);
1118 wallcycle_stop(wcycle
, ewcNS
);
1120 /* initialize the GPU nbnxm atom data and bonded data structures */
1121 if (simulationWork
.useGpuNonbonded
)
1123 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
1125 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1126 Nbnxm::gpu_init_atomdata(nbv
->gpu_nbv
, nbv
->nbat
.get());
1127 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1131 /* Now we put all atoms on the grid, we can assign bonded
1132 * interactions to the GPU, where the grid order is
1133 * needed. Also the xq, f and fshift device buffers have
1134 * been reallocated if needed, so the bonded code can
1135 * learn about them. */
1136 // TODO the xq, f, and fshift buffers are now shared
1137 // resources, so they should be maintained by a
1138 // higher-level object than the nb module.
1139 fr
->gpuBonded
->updateInteractionListsAndDeviceBuffers(nbv
->getGridIndices(),
1141 Nbnxm::gpu_get_xq(nbv
->gpu_nbv
),
1142 Nbnxm::gpu_get_f(nbv
->gpu_nbv
),
1143 Nbnxm::gpu_get_fshift(nbv
->gpu_nbv
));
1145 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1149 if (stepWork
.doNeighborSearch
)
1151 // Need to run after the GPU-offload bonded interaction lists
1152 // are set up to be able to determine whether there is bonded work.
1153 runScheduleWork
->domainWork
=
1154 setupDomainLifetimeWorkload(*inputrec
,
1164 wallcycle_start_nocount(wcycle
, ewcNS
);
1165 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_LOCAL
);
1166 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1167 nbv
->constructPairlist(InteractionLocality::Local
,
1168 &top
->excls
, step
, nrnb
);
1170 nbv
->setupGpuShortRangeWork(fr
->gpuBonded
, InteractionLocality::Local
);
1172 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_LOCAL
);
1173 wallcycle_stop(wcycle
, ewcNS
);
1175 if (useGpuXBufOps
== BufferOpsUseGpu::True
)
1177 nbv
->atomdata_init_copy_x_to_nbat_x_gpu();
1179 // For force buffer ops, we use the below conditon rather than
1180 // useGpuFBufOps to ensure that init is performed even if this
1181 // NS step is also a virial step (on which f buf ops are deactivated).
1182 if (simulationWork
.useGpuBufferOps
&& simulationWork
.useGpuNonbonded
&& (GMX_GPU
== GMX_GPU_CUDA
))
1184 GMX_ASSERT(stateGpu
, "stateGpu should be valid when buffer ops are offloaded");
1185 nbv
->atomdata_init_add_nbat_f_to_f_gpu(stateGpu
->fReducedOnDevice());
1188 else if (!EI_TPI(inputrec
->eI
))
1190 if (useGpuXBufOps
== BufferOpsUseGpu::True
)
1192 GMX_ASSERT(stateGpu
, "stateGpu should be valid when buffer ops are offloaded");
1193 nbv
->convertCoordinatesGpu(AtomLocality::Local
, false,
1194 stateGpu
->getCoordinates(),
1195 localXReadyOnDevice
);
1199 nbv
->convertCoordinates(AtomLocality::Local
, false,
1200 x
.unpaddedArrayRef());
1204 const gmx::DomainLifetimeWorkload
&domainWork
= runScheduleWork
->domainWork
;
1206 if (simulationWork
.useGpuNonbonded
)
1208 ddBalanceRegionHandler
.openBeforeForceComputationGpu();
1210 wallcycle_start(wcycle
, ewcLAUNCH_GPU
);
1212 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1213 Nbnxm::gpu_upload_shiftvec(nbv
->gpu_nbv
, nbv
->nbat
.get());
1214 if (stepWork
.doNeighborSearch
|| (useGpuXBufOps
== BufferOpsUseGpu::False
))
1216 Nbnxm::gpu_copy_xq_to_gpu(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1217 AtomLocality::Local
);
1219 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1220 // with X buffer ops offloaded to the GPU on all but the search steps
1222 // bonded work not split into separate local and non-local, so with DD
1223 // we can only launch the kernel after non-local coordinates have been received.
1224 if (domainWork
.haveGpuBondedWork
&& !havePPDomainDecomposition(cr
))
1226 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1227 fr
->gpuBonded
->launchKernel(fr
, stepWork
, box
);
1228 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1231 /* launch local nonbonded work on GPU */
1232 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1233 do_nb_verlet(fr
, ic
, enerd
, stepWork
, InteractionLocality::Local
, enbvClearFNo
,
1234 step
, nrnb
, wcycle
);
1235 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1236 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1239 if (useGpuPmeOnThisRank
)
1241 // In PME GPU and mixed mode we launch FFT / gather after the
1242 // X copy/transform to allow overlap as well as after the GPU NB
1243 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1244 // the nonbonded kernel.
1245 launchPmeGpuFftAndGather(fr
->pmedata
, wcycle
);
1248 // TODO Update this comment when introducing SimulationWorkload
1250 // The conditions for gpuHaloExchange e.g. using GPU buffer
1251 // operations were checked before construction, so here we can
1252 // just use it and assert upon any conditions.
1253 gmx::GpuHaloExchange
*gpuHaloExchange
= (havePPDomainDecomposition(cr
) ? cr
->dd
->gpuHaloExchange
.get() : nullptr);
1254 const bool ddUsesGpuDirectCommunication
= (gpuHaloExchange
!= nullptr);
1255 GMX_ASSERT(!ddUsesGpuDirectCommunication
|| (useGpuXBufOps
== BufferOpsUseGpu::True
),
1256 "Must use coordinate buffer ops with GPU halo exchange");
1257 const bool useGpuForcesHaloExchange
= ddUsesGpuDirectCommunication
&& (useGpuFBufOps
== BufferOpsUseGpu::True
);
1259 /* Communicate coordinates and sum dipole if necessary +
1260 do non-local pair search */
1261 if (havePPDomainDecomposition(cr
))
1263 if (stepWork
.doNeighborSearch
)
1265 // TODO: fuse this branch with the above large stepWork.doNeighborSearch block
1266 wallcycle_start_nocount(wcycle
, ewcNS
);
1267 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
1268 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1269 nbv
->constructPairlist(InteractionLocality::NonLocal
,
1270 &top
->excls
, step
, nrnb
);
1272 nbv
->setupGpuShortRangeWork(fr
->gpuBonded
, InteractionLocality::NonLocal
);
1273 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
1274 wallcycle_stop(wcycle
, ewcNS
);
1275 if (ddUsesGpuDirectCommunication
)
1277 gpuHaloExchange
->reinitHalo(stateGpu
->getCoordinates(), stateGpu
->getForces());
1282 if (ddUsesGpuDirectCommunication
)
1284 // The following must be called after local setCoordinates (which records an event
1285 // when the coordinate data has been copied to the device).
1286 gpuHaloExchange
->communicateHaloCoordinates(box
, localXReadyOnDevice
);
1288 if (domainWork
.haveCpuBondedWork
|| domainWork
.haveFreeEnergyWork
)
1290 //non-local part of coordinate buffer must be copied back to host for CPU work
1291 stateGpu
->copyCoordinatesFromGpu(x
.unpaddedArrayRef(), AtomLocality::NonLocal
);
1296 dd_move_x(cr
->dd
, box
, x
.unpaddedArrayRef(), wcycle
);
1299 if (useGpuXBufOps
== BufferOpsUseGpu::True
)
1301 // The condition here was (pme != nullptr && pme_gpu_get_device_x(fr->pmedata) != nullptr)
1302 if (!useGpuPmeOnThisRank
&& !ddUsesGpuDirectCommunication
)
1304 stateGpu
->copyCoordinatesToGpu(x
.unpaddedArrayRef(), AtomLocality::NonLocal
);
1306 nbv
->convertCoordinatesGpu(AtomLocality::NonLocal
, false,
1307 stateGpu
->getCoordinates(),
1308 stateGpu
->getCoordinatesReadyOnDeviceEvent(AtomLocality::NonLocal
,
1309 simulationWork
, stepWork
));
1313 nbv
->convertCoordinates(AtomLocality::NonLocal
, false,
1314 x
.unpaddedArrayRef());
1319 if (simulationWork
.useGpuNonbonded
)
1321 wallcycle_start(wcycle
, ewcLAUNCH_GPU
);
1323 if (stepWork
.doNeighborSearch
|| (useGpuXBufOps
== BufferOpsUseGpu::False
))
1325 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1326 Nbnxm::gpu_copy_xq_to_gpu(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1327 AtomLocality::NonLocal
);
1328 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1331 if (domainWork
.haveGpuBondedWork
)
1333 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1334 fr
->gpuBonded
->launchKernel(fr
, stepWork
, box
);
1335 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1338 /* launch non-local nonbonded tasks on GPU */
1339 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1340 do_nb_verlet(fr
, ic
, enerd
, stepWork
, InteractionLocality::NonLocal
, enbvClearFNo
,
1341 step
, nrnb
, wcycle
);
1342 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1344 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1348 if (simulationWork
.useGpuNonbonded
)
1350 /* launch D2H copy-back F */
1351 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
1352 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1354 if (havePPDomainDecomposition(cr
))
1356 Nbnxm::gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1357 stepWork
, AtomLocality::NonLocal
);
1359 Nbnxm::gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1360 stepWork
, AtomLocality::Local
);
1361 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1363 if (domainWork
.haveGpuBondedWork
&& stepWork
.computeEnergy
)
1365 fr
->gpuBonded
->launchEnergyTransfer();
1367 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1370 if (stepWork
.stateChanged
&& inputrecNeedMutot(inputrec
))
1374 gmx_sumd(2*DIM
, mu
, cr
);
1376 ddBalanceRegionHandler
.reopenRegionCpu();
1379 for (i
= 0; i
< 2; i
++)
1381 for (j
= 0; j
< DIM
; j
++)
1383 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1387 if (mdatoms
->nChargePerturbed
== 0)
1389 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1393 for (j
= 0; j
< DIM
; j
++)
1396 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] +
1397 lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1401 /* Reset energies */
1402 reset_enerdata(enerd
);
1403 /* Clear the shift forces */
1404 // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1405 for (gmx::RVec
&elem
: fr
->shiftForces
)
1407 elem
= { 0.0_real
, 0.0_real
, 0.0_real
};
1410 if (DOMAINDECOMP(cr
) && !thisRankHasDuty(cr
, DUTY_PME
))
1412 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1413 dd_force_flop_start(cr
->dd
, nrnb
);
1418 wallcycle_start(wcycle
, ewcROT
);
1419 do_rotation(cr
, enforcedRotation
, box
, as_rvec_array(x
.unpaddedArrayRef().data()), t
, step
, stepWork
.doNeighborSearch
);
1420 wallcycle_stop(wcycle
, ewcROT
);
1423 /* Start the force cycle counter.
1424 * Note that a different counter is used for dynamic load balancing.
1426 wallcycle_start(wcycle
, ewcFORCE
);
1428 // Set up and clear force outputs.
1429 // We use std::move to keep the compiler happy, it has no effect.
1430 ForceOutputs forceOut
= setupForceOutputs(fr
, pull_work
, *inputrec
, std::move(force
), stepWork
, wcycle
);
1432 /* We calculate the non-bonded forces, when done on the CPU, here.
1433 * We do this before calling do_force_lowlevel, because in that
1434 * function, the listed forces are calculated before PME, which
1435 * does communication. With this order, non-bonded and listed
1436 * force calculation imbalance can be balanced out by the domain
1437 * decomposition load balancing.
1440 const bool useOrEmulateGpuNb
= simulationWork
.useGpuNonbonded
|| fr
->nbv
->emulateGpu();
1442 if (!useOrEmulateGpuNb
)
1444 do_nb_verlet(fr
, ic
, enerd
, stepWork
, InteractionLocality::Local
, enbvClearFYes
,
1445 step
, nrnb
, wcycle
);
1448 if (fr
->efep
!= efepNO
)
1450 /* Calculate the local and non-local free energy interactions here.
1451 * Happens here on the CPU both with and without GPU.
1453 nbv
->dispatchFreeEnergyKernel(InteractionLocality::Local
,
1454 fr
, as_rvec_array(x
.unpaddedArrayRef().data()), &forceOut
.forceWithShiftForces(), *mdatoms
,
1455 inputrec
->fepvals
, lambda
.data(),
1456 enerd
, stepWork
, nrnb
);
1458 if (havePPDomainDecomposition(cr
))
1460 nbv
->dispatchFreeEnergyKernel(InteractionLocality::NonLocal
,
1461 fr
, as_rvec_array(x
.unpaddedArrayRef().data()), &forceOut
.forceWithShiftForces(), *mdatoms
,
1462 inputrec
->fepvals
, lambda
.data(),
1463 enerd
, stepWork
, nrnb
);
1467 if (!useOrEmulateGpuNb
)
1469 if (havePPDomainDecomposition(cr
))
1471 do_nb_verlet(fr
, ic
, enerd
, stepWork
, InteractionLocality::NonLocal
, enbvClearFNo
,
1472 step
, nrnb
, wcycle
);
1475 if (stepWork
.computeForces
)
1477 /* Add all the non-bonded force to the normal force array.
1478 * This can be split into a local and a non-local part when overlapping
1479 * communication with calculation with domain decomposition.
1481 wallcycle_stop(wcycle
, ewcFORCE
);
1482 nbv
->atomdata_add_nbat_f_to_f(AtomLocality::All
, forceOut
.forceWithShiftForces().force());
1483 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1486 /* If there are multiple fshift output buffers we need to reduce them */
1487 if (stepWork
.computeVirial
)
1489 /* This is not in a subcounter because it takes a
1490 negligible and constant-sized amount of time */
1491 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv
->nbat
,
1492 forceOut
.forceWithShiftForces().shiftForces());
1496 /* update QMMMrec, if necessary */
1499 update_QMMMrec(cr
, fr
, as_rvec_array(x
.unpaddedArrayRef().data()), mdatoms
, box
);
1502 // TODO Force flags should include haveFreeEnergyWork for this domain
1503 if (ddUsesGpuDirectCommunication
&&
1504 (domainWork
.haveCpuBondedWork
|| domainWork
.haveFreeEnergyWork
))
1506 /* Wait for non-local coordinate data to be copied from device */
1507 nbv
->wait_nonlocal_x_copy_D2H_done();
1509 /* Compute the bonded and non-bonded energies and optionally forces */
1510 do_force_lowlevel(fr
, inputrec
, &(top
->idef
),
1511 cr
, ms
, nrnb
, wcycle
, mdatoms
,
1512 x
, hist
, &forceOut
, enerd
, fcd
,
1513 box
, lambda
.data(), graph
, fr
->mu_tot
,
1515 ddBalanceRegionHandler
);
1517 wallcycle_stop(wcycle
, ewcFORCE
);
1519 computeSpecialForces(fplog
, cr
, inputrec
, awh
, enforcedRotation
,
1520 imdSession
, pull_work
, step
, t
, wcycle
,
1521 fr
->forceProviders
, box
, x
.unpaddedArrayRef(), mdatoms
, lambda
.data(),
1522 stepWork
, &forceOut
.forceWithVirial(), enerd
,
1523 ed
, stepWork
.doNeighborSearch
);
1526 // Will store the amount of cycles spent waiting for the GPU that
1527 // will be later used in the DLB accounting.
1528 float cycles_wait_gpu
= 0;
1529 if (useOrEmulateGpuNb
)
1531 auto &forceWithShiftForces
= forceOut
.forceWithShiftForces();
1533 /* wait for non-local forces (or calculate in emulation mode) */
1534 if (havePPDomainDecomposition(cr
))
1536 if (simulationWork
.useGpuNonbonded
)
1538 cycles_wait_gpu
+= Nbnxm::gpu_wait_finish_task(nbv
->gpu_nbv
,
1539 stepWork
, AtomLocality::NonLocal
,
1540 enerd
->grpp
.ener
[egLJSR
].data(),
1541 enerd
->grpp
.ener
[egCOULSR
].data(),
1542 forceWithShiftForces
.shiftForces(),
1547 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1548 do_nb_verlet(fr
, ic
, enerd
, stepWork
, InteractionLocality::NonLocal
, enbvClearFYes
,
1549 step
, nrnb
, wcycle
);
1550 wallcycle_stop(wcycle
, ewcFORCE
);
1553 if (useGpuFBufOps
== BufferOpsUseGpu::True
)
1555 gmx::FixedCapacityVector
<GpuEventSynchronizer
*, 1> dependencyList
;
1557 // TODO: move this into DomainLifetimeWorkload, including the second part of the condition
1558 // The bonded and free energy CPU tasks can have non-local force contributions
1559 // which are a dependency for the GPU force reduction.
1560 bool haveNonLocalForceContribInCpuBuffer
= domainWork
.haveCpuBondedWork
|| domainWork
.haveFreeEnergyWork
;
1562 if (haveNonLocalForceContribInCpuBuffer
)
1564 stateGpu
->copyForcesToGpu(forceOut
.forceWithShiftForces().force(), AtomLocality::NonLocal
);
1565 dependencyList
.push_back(stateGpu
->getForcesReadyOnDeviceEvent(AtomLocality::NonLocal
,
1566 useGpuFBufOps
== BufferOpsUseGpu::True
));
1569 nbv
->atomdata_add_nbat_f_to_f_gpu(AtomLocality::NonLocal
,
1570 stateGpu
->getForces(),
1571 pme_gpu_get_device_f(fr
->pmedata
),
1573 false, haveNonLocalForceContribInCpuBuffer
);
1574 if (!useGpuForcesHaloExchange
)
1576 // copy from GPU input for dd_move_f()
1577 stateGpu
->copyForcesFromGpu(forceOut
.forceWithShiftForces().force(), AtomLocality::NonLocal
);
1582 nbv
->atomdata_add_nbat_f_to_f(AtomLocality::NonLocal
,
1583 forceWithShiftForces
.force());
1587 if (fr
->nbv
->emulateGpu() && stepWork
.computeVirial
)
1589 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv
->nbat
,
1590 forceWithShiftForces
.shiftForces());
1595 if (havePPDomainDecomposition(cr
))
1597 /* We are done with the CPU compute.
1598 * We will now communicate the non-local forces.
1599 * If we use a GPU this will overlap with GPU work, so in that case
1600 * we do not close the DD force balancing region here.
1602 ddBalanceRegionHandler
.closeAfterForceComputationCpu();
1604 if (stepWork
.computeForces
)
1607 if (useGpuForcesHaloExchange
)
1609 if (domainWork
.haveCpuLocalForceWork
)
1611 stateGpu
->copyForcesToGpu(forceOut
.forceWithShiftForces().force(), AtomLocality::Local
);
1613 gpuHaloExchange
->communicateHaloForces(domainWork
.haveCpuLocalForceWork
);
1617 if (useGpuFBufOps
== BufferOpsUseGpu::True
)
1619 stateGpu
->waitForcesReadyOnHost(AtomLocality::NonLocal
);
1621 dd_move_f(cr
->dd
, &forceOut
.forceWithShiftForces(), wcycle
);
1627 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1628 // an alternating wait/reduction scheme.
1629 bool alternateGpuWait
= (!c_disableAlternatingWait
&& useGpuPmeOnThisRank
&& simulationWork
.useGpuNonbonded
&& !DOMAINDECOMP(cr
) &&
1630 (useGpuFBufOps
== BufferOpsUseGpu::False
));
1631 if (alternateGpuWait
)
1633 alternatePmeNbGpuWaitReduce(fr
->nbv
.get(), fr
->pmedata
, &forceOut
, enerd
,
1634 stepWork
, pmeFlags
, wcycle
);
1637 if (!alternateGpuWait
&& useGpuPmeOnThisRank
)
1639 pme_gpu_wait_and_reduce(fr
->pmedata
, pmeFlags
, wcycle
, &forceOut
.forceWithVirial(), enerd
);
1642 /* Wait for local GPU NB outputs on the non-alternating wait path */
1643 if (!alternateGpuWait
&& simulationWork
.useGpuNonbonded
)
1645 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1646 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1647 * but even with a step of 0.1 ms the difference is less than 1%
1650 const float gpuWaitApiOverheadMargin
= 2e6F
; /* cycles */
1651 const float waitCycles
=
1652 Nbnxm::gpu_wait_finish_task(nbv
->gpu_nbv
,
1653 stepWork
, AtomLocality::Local
,
1654 enerd
->grpp
.ener
[egLJSR
].data(),
1655 enerd
->grpp
.ener
[egCOULSR
].data(),
1656 forceOut
.forceWithShiftForces().shiftForces(),
1659 if (ddBalanceRegionHandler
.useBalancingRegion())
1661 DdBalanceRegionWaitedForGpu waitedForGpu
= DdBalanceRegionWaitedForGpu::yes
;
1662 if (stepWork
.computeForces
&& waitCycles
<= gpuWaitApiOverheadMargin
)
1664 /* We measured few cycles, it could be that the kernel
1665 * and transfer finished earlier and there was no actual
1666 * wait time, only API call overhead.
1667 * Then the actual time could be anywhere between 0 and
1668 * cycles_wait_est. We will use half of cycles_wait_est.
1670 waitedForGpu
= DdBalanceRegionWaitedForGpu::no
;
1672 ddBalanceRegionHandler
.closeAfterForceComputationGpu(cycles_wait_gpu
, waitedForGpu
);
1676 if (fr
->nbv
->emulateGpu())
1678 // NOTE: emulation kernel is not included in the balancing region,
1679 // but emulation mode does not target performance anyway
1680 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1681 do_nb_verlet(fr
, ic
, enerd
, stepWork
, InteractionLocality::Local
,
1682 DOMAINDECOMP(cr
) ? enbvClearFNo
: enbvClearFYes
,
1683 step
, nrnb
, wcycle
);
1684 wallcycle_stop(wcycle
, ewcFORCE
);
1687 // If on GPU PME-PP comms path, receive forces from PME before GPU buffer ops
1688 // TODO refoactor this and unify with below default-path call to the same function
1689 if (PAR(cr
) && !thisRankHasDuty(cr
, DUTY_PME
) && simulationWork
.useGpuPmePpCommunication
)
1691 /* In case of node-splitting, the PP nodes receive the long-range
1692 * forces, virial and energy from the PME nodes here.
1694 pme_receive_force_ener(fr
, cr
, &forceOut
.forceWithVirial(), enerd
, simulationWork
.useGpuPmePpCommunication
, stepWork
.useGpuPmeFReduction
, wcycle
);
1698 /* Do the nonbonded GPU (or emulation) force buffer reduction
1699 * on the non-alternating path. */
1700 if (useOrEmulateGpuNb
&& !alternateGpuWait
)
1702 //TODO simplify the below conditionals. Pass buffer and sync pointers at init stage rather than here. Unify getter fns for sameGPU/otherGPU cases.
1703 void* pmeForcePtr
= stepWork
.useGpuPmeFReduction
?
1704 (thisRankHasDuty(cr
, DUTY_PME
) ?
1705 pme_gpu_get_device_f(fr
->pmedata
) : // PME force buffer on same GPU
1706 fr
->pmePpCommGpu
->getGpuForceStagingPtr()) // buffer received from other GPU
1707 : nullptr; // PME reduction not active on GPU
1709 GpuEventSynchronizer
* const pmeSynchronizer
= stepWork
.useGpuPmeFReduction
?
1710 (thisRankHasDuty(cr
, DUTY_PME
) ?
1711 pme_gpu_get_f_ready_synchronizer(fr
->pmedata
) : // PME force buffer on same GPU
1712 static_cast<GpuEventSynchronizer
*>
1713 (fr
->pmePpCommGpu
->getForcesReadySynchronizer())) // buffer received from other GPU
1714 : nullptr; // PME reduction not active on GPU
1716 gmx::FixedCapacityVector
<GpuEventSynchronizer
*, 2> dependencyList
;
1718 if (stepWork
.useGpuPmeFReduction
)
1720 dependencyList
.push_back(pmeSynchronizer
);
1723 gmx::ArrayRef
<gmx::RVec
> forceWithShift
= forceOut
.forceWithShiftForces().force();
1725 if (useGpuFBufOps
== BufferOpsUseGpu::True
)
1727 // Flag to specify whether the CPU force buffer has contributions to
1728 // local atoms. This depends on whether there are CPU-based force tasks
1729 // or when DD is active the halo exchange has resulted in contributions
1730 // from the non-local part.
1731 const bool haveLocalForceContribInCpuBuffer
= (domainWork
.haveCpuLocalForceWork
|| havePPDomainDecomposition(cr
));
1733 // TODO: move these steps as early as possible:
1734 // - CPU f H2D should be as soon as all CPU-side forces are done
1735 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1736 // before the next CPU task that consumes the forces: vsite spread or update)
1737 // - copy is not perfomed if GPU force halo exchange is active, because it would overwrite the result
1738 // of the halo exchange. In that case the copy is instead performed above, before the exchange.
1739 // These should be unified.
1740 if (haveLocalForceContribInCpuBuffer
&& !useGpuForcesHaloExchange
)
1742 stateGpu
->copyForcesToGpu(forceWithShift
, AtomLocality::Local
);
1743 dependencyList
.push_back(stateGpu
->getForcesReadyOnDeviceEvent(AtomLocality::Local
,
1744 useGpuFBufOps
== BufferOpsUseGpu::True
));
1746 if (useGpuForcesHaloExchange
)
1748 // Add a stream synchronization to satisfy a dependency
1749 // for the local buffer ops on the result of GPU halo
1750 // exchange, which operates in the non-local stream and
1751 // writes to to local parf og the force buffer.
1753 // TODO improve this through use of an event - see Redmine #3093
1754 // push the event into the dependencyList
1755 nbv
->stream_local_wait_for_nonlocal();
1757 nbv
->atomdata_add_nbat_f_to_f_gpu(AtomLocality::Local
,
1758 stateGpu
->getForces(),
1761 stepWork
.useGpuPmeFReduction
, haveLocalForceContribInCpuBuffer
);
1762 // Copy forces to host if they are needed for update or if virtual sites are enabled.
1763 // If there are vsites, we need to copy forces every step to spread vsite forces on host.
1764 // TODO: When the output flags will be included in step workload, this copy can be combined with the
1765 // copy call done in sim_utils(...) for the output.
1766 // NOTE: If there are virtual sites, the forces are modified on host after this D2H copy. Hence,
1767 // they should not be copied in do_md(...) for the output.
1768 if (!simulationWork
.useGpuUpdate
|| vsite
)
1770 stateGpu
->copyForcesFromGpu(forceWithShift
, AtomLocality::Local
);
1771 stateGpu
->waitForcesReadyOnHost(AtomLocality::Local
);
1776 nbv
->atomdata_add_nbat_f_to_f(AtomLocality::Local
, forceWithShift
);
1781 launchGpuEndOfStepTasks(nbv
, fr
->gpuBonded
, fr
->pmedata
, enerd
,
1783 simulationWork
.useGpuNonbonded
, useGpuPmeOnThisRank
,
1787 if (DOMAINDECOMP(cr
))
1789 dd_force_flop_stop(cr
->dd
, nrnb
);
1792 if (stepWork
.computeForces
)
1794 rvec
*f
= as_rvec_array(forceOut
.forceWithShiftForces().force().data());
1796 /* If we have NoVirSum forces, but we do not calculate the virial,
1797 * we sum fr->f_novirsum=forceOut.f later.
1799 if (vsite
&& !(fr
->haveDirectVirialContributions
&& !stepWork
.computeVirial
))
1801 rvec
*fshift
= as_rvec_array(forceOut
.forceWithShiftForces().shiftForces().data());
1802 spread_vsite_f(vsite
, as_rvec_array(x
.unpaddedArrayRef().data()), f
, fshift
, FALSE
, nullptr, nrnb
,
1803 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
, wcycle
);
1806 if (stepWork
.computeVirial
)
1808 /* Calculation of the virial must be done after vsites! */
1809 calc_virial(0, mdatoms
->homenr
, as_rvec_array(x
.unpaddedArrayRef().data()),
1810 forceOut
.forceWithShiftForces(),
1811 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1815 // TODO refoactor this and unify with above PME-PP GPU communication path call to the same function
1816 if (PAR(cr
) && !thisRankHasDuty(cr
, DUTY_PME
) && !simulationWork
.useGpuPmePpCommunication
)
1818 /* In case of node-splitting, the PP nodes receive the long-range
1819 * forces, virial and energy from the PME nodes here.
1821 pme_receive_force_ener(fr
, cr
, &forceOut
.forceWithVirial(), enerd
,
1822 simulationWork
.useGpuPmePpCommunication
, false, wcycle
);
1825 if (stepWork
.computeForces
)
1827 post_process_forces(cr
, step
, nrnb
, wcycle
,
1828 top
, box
, as_rvec_array(x
.unpaddedArrayRef().data()), &forceOut
,
1829 vir_force
, mdatoms
, graph
, fr
, vsite
,
1833 if (stepWork
.computeEnergy
)
1835 /* Sum the potential energy terms from group contributions */
1836 sum_epot(&(enerd
->grpp
), enerd
->term
);
1838 if (!EI_TPI(inputrec
->eI
))
1840 checkPotentialEnergyValidity(step
, *enerd
, *inputrec
);
1844 /* In case we don't have constraints and are using GPUs, the next balancing
1845 * region starts here.
1846 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1847 * virial calculation and COM pulling, is not thus not included in
1848 * the balance timing, which is ok as most tasks do communication.
1850 ddBalanceRegionHandler
.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no
);