Update instructions in containers.rst
[gromacs.git] / src / gromacs / nbnxm / gpu_common.h
blob7d41ee618022e3dbb6bfb18b6c9f22aeeda9c622
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief Common functions for the different NBNXN GPU implementations.
38 * \author Szilard Pall <pall.szilard@gmail.com>
40 * \ingroup module_nbnxm
43 #ifndef GMX_NBNXM_GPU_COMMON_H
44 #define GMX_NBNXM_GPU_COMMON_H
46 #include "config.h"
48 #include <string>
50 #if GMX_GPU_CUDA
51 # include "cuda/nbnxm_cuda_types.h"
52 #endif
54 #if GMX_GPU_OPENCL
55 # include "opencl/nbnxm_ocl_types.h"
56 #endif
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/listed_forces/gpubonded.h"
60 #include "gromacs/math/vec.h"
61 #include "gromacs/mdtypes/simulation_workload.h"
62 #include "gromacs/nbnxm/nbnxm.h"
63 #include "gromacs/pbcutil/ishift.h"
64 #include "gromacs/timing/gpu_timing.h"
65 #include "gromacs/timing/wallcycle.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/stringutil.h"
69 #include "gpu_common_utils.h"
70 #include "nbnxm_gpu.h"
72 namespace gmx
74 class GpuBonded;
77 namespace Nbnxm
80 /*! \brief Check that atom locality values are valid for the GPU module.
82 * In the GPU module atom locality "all" is not supported, the local and
83 * non-local ranges are treated separately.
85 * \param[in] atomLocality atom locality specifier
87 static inline void validateGpuAtomLocality(const AtomLocality atomLocality)
89 std::string str = gmx::formatString(
90 "Invalid atom locality passed (%d); valid here is only "
91 "local (%d) or nonlocal (%d)",
92 static_cast<int>(atomLocality), static_cast<int>(AtomLocality::Local),
93 static_cast<int>(AtomLocality::NonLocal));
95 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal, str.c_str());
98 /*! \brief Convert atom locality to interaction locality.
100 * In the current implementation the this is straightforward conversion:
101 * local to local, non-local to non-local.
103 * \param[in] atomLocality Atom locality specifier
104 * \returns Interaction locality corresponding to the atom locality passed.
106 static inline InteractionLocality gpuAtomToInteractionLocality(const AtomLocality atomLocality)
108 validateGpuAtomLocality(atomLocality);
110 /* determine interaction locality from atom locality */
111 if (atomLocality == AtomLocality::Local)
113 return InteractionLocality::Local;
115 else if (atomLocality == AtomLocality::NonLocal)
117 return InteractionLocality::NonLocal;
119 else
121 gmx_incons("Wrong locality");
126 //NOLINTNEXTLINE(misc-definitions-in-headers)
127 void setupGpuShortRangeWork(NbnxmGpu* nb, const gmx::GpuBonded* gpuBonded, const gmx::InteractionLocality iLocality)
129 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
131 // There is short-range work if the pair list for the provided
132 // interaction locality contains entries or if there is any
133 // bonded work (as this is not split into local/nonlocal).
134 nb->haveWork[iLocality] = ((nb->plist[iLocality]->nsci != 0)
135 || (gpuBonded != nullptr && gpuBonded->haveInteractions()));
138 /*! \brief Returns true if there is GPU short-range work for the given interaction locality.
140 * Note that as, unlike nonbonded tasks, bonded tasks are not split into local/nonlocal,
141 * and therefore if there are GPU offloaded bonded interactions, this function will return
142 * true for all interaction localities.
144 * \param[inout] nb Pointer to the nonbonded GPU data structure
145 * \param[in] iLocality Interaction locality identifier
147 static bool haveGpuShortRangeWork(const NbnxmGpu& nb, const gmx::InteractionLocality iLocality)
149 return nb.haveWork[iLocality];
152 //NOLINTNEXTLINE(misc-definitions-in-headers)
153 bool haveGpuShortRangeWork(const NbnxmGpu* nb, const gmx::AtomLocality aLocality)
155 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
157 return haveGpuShortRangeWork(*nb, gpuAtomToInteractionLocality(aLocality));
161 /*! \brief Calculate atom range and return start index and length.
163 * \param[in] atomData Atom descriptor data structure
164 * \param[in] atomLocality Atom locality specifier
165 * \param[out] atomRangeBegin Starting index of the atom range in the atom data array.
166 * \param[out] atomRangeLen Atom range length in the atom data array.
168 template<typename AtomDataT>
169 static inline void getGpuAtomRange(const AtomDataT* atomData,
170 const AtomLocality atomLocality,
171 int* atomRangeBegin,
172 int* atomRangeLen)
174 assert(atomData);
175 validateGpuAtomLocality(atomLocality);
177 /* calculate the atom data index range based on locality */
178 if (atomLocality == AtomLocality::Local)
180 *atomRangeBegin = 0;
181 *atomRangeLen = atomData->natoms_local;
183 else
185 *atomRangeBegin = atomData->natoms_local;
186 *atomRangeLen = atomData->natoms - atomData->natoms_local;
191 /*! \brief Count pruning kernel time if either kernel has been triggered
193 * We do the accounting for either of the two pruning kernel flavors:
194 * - 1st pass prune: ran during the current step (prior to the force kernel);
195 * - rolling prune: ran at the end of the previous step (prior to the current step H2D xq);
197 * Note that the resetting of cu_timers_t::didPrune and cu_timers_t::didRollingPrune should happen
198 * after calling this function.
200 * \param[in] timers structs with GPU timer objects
201 * \param[inout] timings GPU task timing data
202 * \param[in] iloc interaction locality
204 template<typename GpuTimers>
205 static void countPruneKernelTime(GpuTimers* timers,
206 gmx_wallclock_gpu_nbnxn_t* timings,
207 const InteractionLocality iloc)
209 gpu_timers_t::Interaction& iTimers = timers->interaction[iloc];
211 // We might have not done any pruning (e.g. if we skipped with empty domains).
212 if (!iTimers.didPrune && !iTimers.didRollingPrune)
214 return;
217 if (iTimers.didPrune)
219 timings->pruneTime.c++;
220 timings->pruneTime.t += iTimers.prune_k.getLastRangeTime();
223 if (iTimers.didRollingPrune)
225 timings->dynamicPruneTime.c++;
226 timings->dynamicPruneTime.t += iTimers.rollingPrune_k.getLastRangeTime();
230 /*! \brief Reduce data staged internally in the nbnxn module.
232 * Shift forces and electrostatic/LJ energies copied from the GPU into
233 * a module-internal staging area are immediately reduced (CPU-side buffers passed)
234 * after having waited for the transfers' completion.
236 * Note that this function should always be called after the transfers into the
237 * staging buffers has completed.
239 * \tparam StagingData Type of staging data
240 * \param[in] nbst Nonbonded staging data
241 * \param[in] iLocality Interaction locality specifier
242 * \param[in] reduceEnergies True if energy reduction should be done
243 * \param[in] reduceFshift True if shift force reduction should be done
244 * \param[out] e_lj Variable to accumulate LJ energy into
245 * \param[out] e_el Variable to accumulate electrostatic energy into
246 * \param[out] fshift Pointer to the array of shift forces to accumulate into
248 template<typename StagingData>
249 static inline void gpu_reduce_staged_outputs(const StagingData& nbst,
250 const InteractionLocality iLocality,
251 const bool reduceEnergies,
252 const bool reduceFshift,
253 real* e_lj,
254 real* e_el,
255 rvec* fshift)
257 /* add up energies and shift forces (only once at local F wait) */
258 if (iLocality == InteractionLocality::Local)
260 if (reduceEnergies)
262 *e_lj += *nbst.e_lj;
263 *e_el += *nbst.e_el;
266 if (reduceFshift)
268 for (int i = 0; i < SHIFTS; i++)
270 rvec_inc(fshift[i], nbst.fshift[i]);
276 /*! \brief Do the per-step timing accounting of the nonbonded tasks.
278 * Does timing accumulation and call-count increments for the nonbonded kernels.
279 * Note that this function should be called after the current step's nonbonded
280 * nonbonded tasks have completed with the exception of the rolling pruning kernels
281 * that are accounted for during the following step.
283 * NOTE: if timing with multiple GPUs (streams) becomes possible, the
284 * counters could end up being inconsistent due to not being incremented
285 * on some of the node when this is skipped on empty local domains!
287 * \tparam GpuTimers GPU timers type
288 * \tparam GpuPairlist Pair list type
289 * \param[out] timings Pointer to the NB GPU timings data
290 * \param[in] timers Pointer to GPU timers data
291 * \param[in] plist Pointer to the pair list data
292 * \param[in] atomLocality Atom locality specifier
293 * \param[in] stepWork Force schedule flags
294 * \param[in] doTiming True if timing is enabled.
297 template<typename GpuTimers, typename GpuPairlist>
298 static inline void gpu_accumulate_timings(gmx_wallclock_gpu_nbnxn_t* timings,
299 GpuTimers* timers,
300 const GpuPairlist* plist,
301 AtomLocality atomLocality,
302 const gmx::StepWorkload& stepWork,
303 bool doTiming)
305 /* timing data accumulation */
306 if (!doTiming)
308 return;
311 /* determine interaction locality from atom locality */
312 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
313 const bool didEnergyKernels = stepWork.computeEnergy;
315 /* only increase counter once (at local F wait) */
316 if (iLocality == InteractionLocality::Local)
318 timings->nb_c++;
319 timings->ktime[plist->haveFreshList ? 1 : 0][didEnergyKernels ? 1 : 0].c += 1;
322 /* kernel timings */
323 timings->ktime[plist->haveFreshList ? 1 : 0][didEnergyKernels ? 1 : 0].t +=
324 timers->interaction[iLocality].nb_k.getLastRangeTime();
326 /* X/q H2D and F D2H timings */
327 timings->nb_h2d_t += timers->xf[atomLocality].nb_h2d.getLastRangeTime();
328 timings->nb_d2h_t += timers->xf[atomLocality].nb_d2h.getLastRangeTime();
330 /* Count the pruning kernel times for both cases:1st pass (at search step)
331 and rolling pruning (if called at the previous step).
332 We do the accounting here as this is the only sync point where we
333 know (without checking or additional sync-ing) that prune tasks in
334 in the current stream have completed (having just blocking-waited
335 for the force D2H). */
336 countPruneKernelTime(timers, timings, iLocality);
338 /* only count atdat at pair-search steps (add only once, at local F wait) */
339 if (stepWork.doNeighborSearch && atomLocality == AtomLocality::Local)
341 /* atdat transfer timing */
342 timings->pl_h2d_c++;
343 timings->pl_h2d_t += timers->atdat.getLastRangeTime();
346 /* only count pair-list H2D when actually performed */
347 if (timers->interaction[iLocality].didPairlistH2D)
349 timings->pl_h2d_t += timers->interaction[iLocality].pl_h2d.getLastRangeTime();
351 /* Clear the timing flag for the next step */
352 timers->interaction[iLocality].didPairlistH2D = false;
356 /*! \brief Attempts to complete nonbonded GPU task.
358 * See documentation in nbnxm_gpu.h for details.
360 * \todo Move into shared source file, perhaps including
361 * cuda_runtime.h if needed for any remaining CUDA-specific
362 * objects.
364 //NOLINTNEXTLINE(misc-definitions-in-headers)
365 bool gpu_try_finish_task(NbnxmGpu* nb,
366 const gmx::StepWorkload& stepWork,
367 const AtomLocality aloc,
368 real* e_lj,
369 real* e_el,
370 gmx::ArrayRef<gmx::RVec> shiftForces,
371 GpuTaskCompletion completionKind,
372 gmx_wallcycle* wcycle)
374 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
376 /* determine interaction locality from atom locality */
377 const InteractionLocality iLocality = gpuAtomToInteractionLocality(aloc);
380 // Transfers are launched and therefore need to be waited on if:
381 // - buffer ops is not offloaded
382 // - energies or virials are needed (on the local stream)
384 // (Note that useGpuFBufferOps and computeVirial are mutually exclusive
385 // in current code as virial steps do CPU reduction.)
386 const bool haveResultToWaitFor =
387 (!stepWork.useGpuFBufferOps
388 || (aloc == AtomLocality::Local && (stepWork.computeEnergy || stepWork.computeVirial)));
390 // We skip when during the non-local phase there was actually no work to do.
391 // This is consistent with nbnxn_gpu_launch_kernel but it also considers possible
392 // bonded GPU work.
393 if ((iLocality == InteractionLocality::Local) || haveGpuShortRangeWork(*nb, iLocality))
395 // Query the state of the GPU stream and return early if we're not done
396 if (completionKind == GpuTaskCompletion::Check)
398 // To get the wcycle call count right, when in GpuTaskCompletion::Check mode,
399 // we start without counting and only when the task finished we issue a
400 // start/stop to increment.
401 // GpuTaskCompletion::Wait mode the timing is expected to be done in the caller.
402 wallcycle_start_nocount(wcycle, ewcWAIT_GPU_NB_L);
404 if (!haveStreamTasksCompleted(*nb->deviceStreams[iLocality]))
406 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
408 // Early return to skip the steps below that we have to do only
409 // after the NB task completed
410 return false;
413 wallcycle_increment_event_count(wcycle, ewcWAIT_GPU_NB_L);
415 else if (haveResultToWaitFor)
417 nb->deviceStreams[iLocality]->synchronize();
420 // TODO: this needs to be moved later because conditional wait could brake timing
421 // with a future OpenCL implementation, but with CUDA timing is anyway disabled
422 // in all cases where we skip the wait.
423 gpu_accumulate_timings(nb->timings, nb->timers, nb->plist[iLocality], aloc, stepWork, nb->bDoTime);
425 if (stepWork.computeEnergy || stepWork.computeVirial)
427 gpu_reduce_staged_outputs(nb->nbst, iLocality, stepWork.computeEnergy, stepWork.computeVirial,
428 e_lj, e_el, as_rvec_array(shiftForces.data()));
432 /* Always reset both pruning flags (doesn't hurt doing it even when timing is off). */
433 nb->timers->interaction[iLocality].didPrune =
434 nb->timers->interaction[iLocality].didRollingPrune = false;
436 /* Turn off initial list pruning (doesn't hurt if this is not pair-search step). */
437 nb->plist[iLocality]->haveFreshList = false;
439 return true;
442 /*! \brief
443 * Wait for the asynchronously launched nonbonded tasks and data
444 * transfers to finish.
446 * Also does timing accounting and reduction of the internal staging buffers.
447 * As this is called at the end of the step, it also resets the pair list and
448 * pruning flags.
450 * \param[in] nb The nonbonded data GPU structure
451 * \param[in] stepWork Force schedule flags
452 * \param[in] aloc Atom locality identifier
453 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
454 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
455 * \param[out] shiftForces Shift forces buffer to accumulate into
456 * \param[out] wcycle Pointer to wallcycle data structure
457 * \return The number of cycles the gpu wait took
459 //NOLINTNEXTLINE(misc-definitions-in-headers) TODO: move into source file
460 float gpu_wait_finish_task(NbnxmGpu* nb,
461 const gmx::StepWorkload& stepWork,
462 AtomLocality aloc,
463 real* e_lj,
464 real* e_el,
465 gmx::ArrayRef<gmx::RVec> shiftForces,
466 gmx_wallcycle* wcycle)
468 auto cycleCounter = (gpuAtomToInteractionLocality(aloc) == InteractionLocality::Local)
469 ? ewcWAIT_GPU_NB_L
470 : ewcWAIT_GPU_NB_NL;
472 wallcycle_start(wcycle, cycleCounter);
473 gpu_try_finish_task(nb, stepWork, aloc, e_lj, e_el, shiftForces, GpuTaskCompletion::Wait, wcycle);
474 float waitTime = wallcycle_stop(wcycle, cycleCounter);
476 return waitTime;
479 } // namespace Nbnxm
481 #endif