Introduce SimulatorBuilder
[gromacs.git] / src / gromacs / nbnxm / nbnxm_gpu.h
blobb213ff4c07c117fbac44a05ea0c192b961f403b1
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \libinternal \file
36 * \brief Declare interface for GPU execution for NBNXN module
38 * \author Szilard Pall <pall.szilard@gmail.com>
39 * \author Mark Abraham <mark.j.abraham@gmail.com>
40 * \ingroup module_nbnxm
43 #ifndef GMX_NBNXM_NBNXM_GPU_H
44 #define GMX_NBNXM_NBNXM_GPU_H
46 #include "gromacs/gpu_utils/gpu_macros.h"
47 #include "gromacs/math/vectypes.h"
48 #include "gromacs/nbnxm/atomdata.h"
49 #include "gromacs/utility/basedefinitions.h"
50 #include "gromacs/utility/real.h"
52 #include "gpu_types.h"
53 #include "locality.h"
55 struct nbnxn_atomdata_t;
56 enum class GpuTaskCompletion;
57 enum class GpuBufferOpsAccumulateForce;
59 namespace gmx
61 class GpuBonded;
64 namespace Nbnxm
67 class Grid;
69 /*! \brief
70 * Launch asynchronously the xq buffer host to device copy.
72 * The nonlocal copy is skipped if there is no dependent work to do,
73 * neither non-local nonbonded interactions nor bonded GPU work.
75 * \param [in] nb GPU nonbonded data.
76 * \param [in] nbdata Host-side atom data structure.
77 * \param [in] aloc Atom locality flag.
79 GPU_FUNC_QUALIFIER
80 void gpu_copy_xq_to_gpu(gmx_nbnxn_gpu_t gmx_unused *nb,
81 const struct nbnxn_atomdata_t gmx_unused *nbdata,
82 AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
84 /*! \brief
85 * Launch asynchronously the nonbonded force calculations.
87 * Also launches the initial pruning of a fresh list after search.
89 * The local and non-local interaction calculations are launched in two
90 * separate streams. If there is no work (i.e. empty pair list), the
91 * force kernel launch is omitted.
94 GPU_FUNC_QUALIFIER
95 void gpu_launch_kernel(gmx_nbnxn_gpu_t gmx_unused *nb,
96 int gmx_unused flags,
97 InteractionLocality gmx_unused iloc) GPU_FUNC_TERM;
99 /*! \brief
100 * Launch asynchronously the nonbonded prune-only kernel.
102 * The local and non-local list pruning are launched in their separate streams.
104 * Notes for future scheduling tuning:
105 * Currently we schedule the dynamic pruning between two MD steps *after* both local and
106 * nonlocal force D2H transfers completed. We could launch already after the cpyback
107 * is launched, but we want to avoid prune kernels (especially in the non-local
108 * high prio-stream) competing with nonbonded work.
110 * However, this is not ideal as this schedule does not expose the available
111 * concurrency. The dynamic pruning kernel:
112 * - should be allowed to overlap with any task other than force compute, including
113 * transfers (F D2H and the next step's x H2D as well as force clearing).
114 * - we'd prefer to avoid competition with non-bonded force kernels belonging
115 * to the same rank and ideally other ranks too.
117 * In the most general case, the former would require scheduling pruning in a separate
118 * stream and adding additional event sync points to ensure that force kernels read
119 * consistent pair list data. This would lead to some overhead (due to extra
120 * cudaStreamWaitEvent calls, 3-5 us/call) which we might be able to live with.
121 * The gains from additional overlap might not be significant as long as
122 * update+constraints anyway takes longer than pruning, but there will still
123 * be use-cases where more overlap may help (e.g. multiple ranks per GPU,
124 * no/hbonds only constraints).
125 * The above second point is harder to address given that multiple ranks will often
126 * share a GPU. Ranks that complete their nonbondeds sooner can schedule pruning earlier
127 * and without a third priority level it is difficult to avoid some interference of
128 * prune kernels with force tasks (in particular preemption of low-prio local force task).
130 * \param [inout] nb GPU nonbonded data.
131 * \param [in] iloc Interaction locality flag.
132 * \param [in] numParts Number of parts the pair list is split into in the rolling kernel.
134 GPU_FUNC_QUALIFIER
135 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t gmx_unused *nb,
136 InteractionLocality gmx_unused iloc,
137 int gmx_unused numParts) GPU_FUNC_TERM;
139 /*! \brief
140 * Launch asynchronously the download of short-range forces from the GPU
141 * (and energies/shift forces if required).
143 GPU_FUNC_QUALIFIER
144 void gpu_launch_cpyback(gmx_nbnxn_gpu_t gmx_unused *nb,
145 nbnxn_atomdata_t gmx_unused *nbatom,
146 int gmx_unused flags,
147 AtomLocality gmx_unused aloc,
148 const bool gmx_unused copyBackNbForce) GPU_FUNC_TERM;
150 /*! \brief Attempts to complete nonbonded GPU task.
152 * This function attempts to complete the nonbonded task (both GPU and CPU auxiliary work).
153 * Success, i.e. that the tasks completed and results are ready to be consumed, is signaled
154 * by the return value (always true if blocking wait mode requested).
156 * The \p completionKind parameter controls whether the behavior is non-blocking
157 * (achieved by passing GpuTaskCompletion::Check) or blocking wait until the results
158 * are ready (when GpuTaskCompletion::Wait is passed).
159 * As the "Check" mode the function will return immediately if the GPU stream
160 * still contain tasks that have not completed, it allows more flexible overlapping
161 * of work on the CPU with GPU execution.
163 * Note that it is only safe to use the results, and to continue to the next MD
164 * step when this function has returned true which indicates successful completion of
165 * - All nonbonded GPU tasks: both compute and device transfer(s)
166 * - auxiliary tasks: updating the internal module state (timing accumulation, list pruning states) and
167 * - internal staging reduction of (\p fshift, \p e_el, \p e_lj).
169 * TODO: improve the handling of outputs e.g. by ensuring that this function explcitly returns the
170 * force buffer (instead of that being passed only to nbnxn_gpu_launch_cpyback()) and by returning
171 * the energy and Fshift contributions for some external/centralized reduction.
173 * \param[in] nb The nonbonded data GPU structure
174 * \param[in] flags Force flags
175 * \param[in] aloc Atom locality identifier
176 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
177 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
178 * \param[out] fshift Pointer to the shift force buffer to accumulate into
179 * \param[in] completionKind Indicates whether nnbonded task completion should only be checked rather than waited for
180 * \returns True if the nonbonded tasks associated with \p aloc locality have completed
182 GPU_FUNC_QUALIFIER
183 bool gpu_try_finish_task(gmx_nbnxn_gpu_t gmx_unused *nb,
184 int gmx_unused flags,
185 AtomLocality gmx_unused aloc,
186 real gmx_unused *e_lj,
187 real gmx_unused *e_el,
188 rvec gmx_unused *fshift,
189 GpuTaskCompletion gmx_unused completionKind) GPU_FUNC_TERM_WITH_RETURN(false);
191 /*! \brief Completes the nonbonded GPU task blocking until GPU tasks and data
192 * transfers to finish.
194 * Also does timing accounting and reduction of the internal staging buffers.
195 * As this is called at the end of the step, it also resets the pair list and
196 * pruning flags.
198 * \param[in] nb The nonbonded data GPU structure
199 * \param[in] flags Force flags
200 * \param[in] aloc Atom locality identifier
201 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
202 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
203 * \param[out] fshift Pointer to the shift force buffer to accumulate into
205 GPU_FUNC_QUALIFIER
206 void gpu_wait_finish_task(gmx_nbnxn_gpu_t gmx_unused *nb,
207 int gmx_unused flags,
208 AtomLocality gmx_unused aloc,
209 real gmx_unused *e_lj,
210 real gmx_unused *e_el,
211 rvec gmx_unused *fshift) GPU_FUNC_TERM;
213 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
214 GPU_FUNC_QUALIFIER
215 int gpu_pick_ewald_kernel_type(bool gmx_unused bTwinCut) GPU_FUNC_TERM_WITH_RETURN(-1);
217 /*! \brief Initialization for X buffer operations on GPU.
218 * Called on the NS step and performs (re-)allocations and memory copies. !*/
219 CUDA_FUNC_QUALIFIER
220 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet gmx_unused &gridSet,
221 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv) CUDA_FUNC_TERM;
223 /*! \brief X buffer operations on GPU: performs conversion from rvec to nb format.
225 CUDA_FUNC_QUALIFIER
226 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid gmx_unused &grid,
227 bool gmx_unused setFillerCoords,
228 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
229 void gmx_unused *xPmeDevicePtr,
230 Nbnxm::AtomLocality gmx_unused locality,
231 const rvec gmx_unused *x,
232 int gmx_unused gridId,
233 int gmx_unused numColumnsMax) CUDA_FUNC_TERM;
235 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
236 * \param[in] nb The nonbonded data GPU structure
237 * \param[in] interactionLocality Local or NonLocal sync point
239 CUDA_FUNC_QUALIFIER
240 void nbnxnInsertNonlocalGpuDependency(const gmx_nbnxn_gpu_t gmx_unused *nb,
241 const InteractionLocality gmx_unused interactionLocality) CUDA_FUNC_TERM;
243 /*! \brief Set up internal flags that indicate what type of short-range work there is.
245 * As nonbondeds and bondeds share input/output buffers and GPU queues,
246 * both are considered when checking for work in the current domain.
248 * This function is expected to be called every time the work-distribution
249 * can change (i.e. at search/domain decomposition steps).
251 * \param[inout] nb Pointer to the nonbonded GPU data structure
252 * \param[in] gpuBonded Pointer to the GPU bonded data structure
253 * \param[in] iLocality Interaction locality identifier
255 GPU_FUNC_QUALIFIER
256 void setupGpuShortRangeWork(gmx_nbnxn_gpu_t gmx_unused *nb,
257 const gmx::GpuBonded gmx_unused *gpuBonded,
258 const Nbnxm::InteractionLocality gmx_unused iLocality) GPU_FUNC_TERM;
260 /*! \brief Returns true if there is GPU short-range work for the given atom locality.
262 * Note that as, unlike nonbonded tasks, bonded tasks are not split into local/nonlocal,
263 * and therefore if there are GPU offloaded bonded interactions, this function will return
264 * true for both local and nonlocal atom range.
266 * \param[inout] nb Pointer to the nonbonded GPU data structure
267 * \param[in] aLocality Atom locality identifier
269 GPU_FUNC_QUALIFIER
270 bool haveGpuShortRangeWork(const gmx_nbnxn_gpu_t gmx_unused *nb,
271 const Nbnxm::AtomLocality gmx_unused aLocality) GPU_FUNC_TERM_WITH_RETURN(false);
273 /*! \brief Initialization for F buffer operations on GPU */
274 CUDA_FUNC_QUALIFIER
275 void nbnxn_gpu_init_add_nbat_f_to_f(const int gmx_unused *cell,
276 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
277 int gmx_unused natoms_total) CUDA_FUNC_TERM;
279 /*! \brief F buffer operations on GPU: adds nb format force to rvec format. */
280 CUDA_FUNC_QUALIFIER
281 void nbnxn_gpu_add_nbat_f_to_f(const AtomLocality gmx_unused atomLocality,
282 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
283 int gmx_unused atomStart,
284 int gmx_unused nAtoms,
285 GpuBufferOpsAccumulateForce gmx_unused accumulateForce) CUDA_FUNC_TERM;
287 /*! \brief Copy force buffer from CPU to GPU */
288 CUDA_FUNC_QUALIFIER
289 void nbnxn_launch_copy_f_to_gpu(const AtomLocality gmx_unused atomLocality,
290 const Nbnxm::GridSet gmx_unused &gridSet,
291 gmx_nbnxn_gpu_t gmx_unused *nb,
292 rvec gmx_unused *f) CUDA_FUNC_TERM;
294 /*! \brief Copy force buffer from GPU to CPU */
295 CUDA_FUNC_QUALIFIER
296 void nbnxn_launch_copy_f_from_gpu(const AtomLocality gmx_unused atomLocality,
297 const Nbnxm::GridSet gmx_unused &gridSet,
298 gmx_nbnxn_gpu_t gmx_unused *nb,
299 rvec gmx_unused *f) CUDA_FUNC_TERM;
301 /*! \brief Wait for GPU stream to complete */
302 CUDA_FUNC_QUALIFIER
303 void nbnxn_wait_for_gpu_force_reduction(const AtomLocality gmx_unused atomLocality,
304 gmx_nbnxn_gpu_t gmx_unused *nb) CUDA_FUNC_TERM;
307 } // namespace Nbnxm
309 #endif