2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/nbnxm/nbnxm_gpu.h"
54 #include "nbnxm_cuda.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/vectype_ops.cuh"
58 #include "gromacs/mdlib/force_flags.h"
59 #include "gromacs/nbnxm/atomdata.h"
60 #include "gromacs/nbnxm/gpu_common.h"
61 #include "gromacs/nbnxm/gpu_common_utils.h"
62 #include "gromacs/nbnxm/gpu_data_mgmt.h"
63 #include "gromacs/nbnxm/grid.h"
64 #include "gromacs/nbnxm/nbnxm.h"
65 #include "gromacs/nbnxm/pairlist.h"
66 #include "gromacs/nbnxm/cuda/nbnxm_buffer_ops_kernels.cuh"
67 #include "gromacs/timing/gpu_timing.h"
68 #include "gromacs/utility/cstringutil.h"
69 #include "gromacs/utility/gmxassert.h"
71 #include "nbnxm_cuda_types.h"
73 /***** The kernel declarations/definitions come here *****/
75 /* Top-level kernel declaration generation: will generate through multiple
76 * inclusion the following flavors for all kernel declarations:
77 * - force-only output;
78 * - force and energy output;
79 * - force-only with pair list pruning;
80 * - force and energy output with pair list pruning.
82 #define FUNCTION_DECLARATION_ONLY
84 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
85 /** Force & energy **/
87 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
90 /*** Pair-list pruning kernels ***/
93 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
94 /** Force & energy **/
96 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
100 /* Prune-only kernels */
101 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cuh"
102 #undef FUNCTION_DECLARATION_ONLY
104 /* Now generate the function definitions if we are using a single compilation unit. */
105 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
106 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_noprune.cu"
107 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_prune.cu"
108 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_noprune.cu"
109 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_prune.cu"
110 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cu"
111 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
117 //! Number of CUDA threads in a block
118 //TODO Optimize this through experimentation
119 constexpr static int c_bufOpsThreadsPerBlock = 128;
121 /*! Nonbonded kernel function pointer type */
122 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
127 /*********************************/
129 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
130 static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo)
135 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
136 empty domain) and that case should be handled before this point. */
137 assert(nwork_units > 0);
139 max_grid_x_size = dinfo->prop.maxGridSize[0];
141 /* do we exceed the grid x dimension limit? */
142 if (nwork_units > max_grid_x_size)
144 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
145 "The number of nonbonded work units (=number of super-clusters) exceeds the"
146 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
153 /* Constant arrays listing all kernel function pointers and enabling selection
154 of a kernel in an elegant manner. */
156 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
157 * electrostatics and VDW type.
159 * Note that the row- and column-order of function pointers has to match the
160 * order of corresponding enumerated electrostatics and vdw types, resp.,
161 * defined in nbnxn_cuda_types.h.
164 /*! Force-only kernel function pointers. */
165 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
167 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
168 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
169 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
170 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
171 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
172 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
175 /*! Force + energy kernel function pointers. */
176 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
178 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
179 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
180 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
181 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
182 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
183 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
186 /*! Force + pruning kernel function pointers. */
187 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
189 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
190 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
191 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
192 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
193 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
194 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
197 /*! Force + energy + pruning kernel function pointers. */
198 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
200 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
201 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
202 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
203 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
204 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
205 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
208 /*! Return a pointer to the kernel version to be executed at the current step. */
209 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
213 const gmx_device_info_t gmx_unused *devInfo)
215 nbnxn_cu_kfunc_ptr_t res;
217 GMX_ASSERT(eeltype < eelCuNR,
218 "The electrostatics type requested is not implemented in the CUDA kernels.");
219 GMX_ASSERT(evdwtype < evdwCuNR,
220 "The VdW type requested is not implemented in the CUDA kernels.");
222 /* assert assumptions made by the kernels */
223 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
224 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
230 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
234 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
241 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
245 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
252 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
253 static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
259 /* size of shmem (force-buffers/xq/atom type preloading) */
260 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
261 /* i-atom x+q in shared memory */
262 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
263 /* cj in shared memory, for each warp separately */
264 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
266 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
267 nbp->vdwtype == evdwCuCUTCOMBLB)
269 /* i-atom LJ combination parameters in shared memory */
270 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
274 /* i-atom types in shared memory */
275 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
281 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
283 * As the point where the local stream tasks can be considered complete happens
284 * at the same call point where the nonlocal stream should be synced with the
285 * the local, this function records the event if called with the local stream as
286 * argument and inserts in the GPU stream a wait on the event on the nonlocal.
288 void nbnxnInsertNonlocalGpuDependency(const gmx_nbnxn_cuda_t *nb,
289 const InteractionLocality interactionLocality)
291 cudaStream_t stream = nb->stream[interactionLocality];
293 /* When we get here all misc operations issued in the local stream as well as
294 the local xq H2D are done,
295 so we record that in the local stream and wait for it in the nonlocal one.
296 This wait needs to precede any PP tasks, bonded or nonbonded, that may
297 compute on interactions between local and nonlocal atoms.
299 if (nb->bUseTwoStreams)
301 if (interactionLocality == InteractionLocality::Local)
303 cudaError_t stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
304 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
308 cudaError_t stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
309 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
314 /*! \brief Launch asynchronously the xq buffer host to device copy. */
315 void gpu_copy_xq_to_gpu(gmx_nbnxn_cuda_t *nb,
316 const nbnxn_atomdata_t *nbatom,
317 const AtomLocality atomLocality)
319 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
321 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal,
322 "Only local and non-local xq transfers are supported");
324 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
326 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
328 cu_atomdata_t *adat = nb->atdat;
329 cu_plist_t *plist = nb->plist[iloc];
330 cu_timers_t *t = nb->timers;
331 cudaStream_t stream = nb->stream[iloc];
333 bool bDoTime = nb->bDoTime;
335 /* Don't launch the non-local H2D copy if there is no dependent
336 work to do: neither non-local nor other (e.g. bonded) work
337 to do that has as input the nbnxn coordaintes.
338 Doing the same for the local kernel is more complicated, since the
339 local part of the force array also depends on the non-local kernel.
340 So to avoid complicating the code and to reduce the risk of bugs,
341 we always call the local local x+q copy (and the rest of the local
342 work in nbnxn_gpu_launch_kernel().
344 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
346 plist->haveFreshList = false;
351 /* calculate the atom data index range based on locality */
352 if (atomLocality == AtomLocality::Local)
355 adat_len = adat->natoms_local;
359 adat_begin = adat->natoms_local;
360 adat_len = adat->natoms - adat->natoms_local;
364 /* beginning of timed HtoD section */
367 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
370 cu_copy_H2D_async(adat->xq + adat_begin, static_cast<const void *>(nbatom->x().data() + adat_begin * 4),
371 adat_len * sizeof(*adat->xq), stream);
375 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
378 /* When we get here all misc operations issued in the local stream as well as
379 the local xq H2D are done,
380 so we record that in the local stream and wait for it in the nonlocal one.
381 This wait needs to precede any PP tasks, bonded or nonbonded, that may
382 compute on interactions between local and nonlocal atoms.
384 nbnxnInsertNonlocalGpuDependency(nb, iloc);
387 /*! As we execute nonbonded workload in separate streams, before launching
388 the kernel we need to make sure that he following operations have completed:
389 - atomdata allocation and related H2D transfers (every nstlist step);
390 - pair list H2D transfer (every nstlist step);
391 - shift vector H2D transfer (every nstlist step);
392 - force (+shift force and energy) output clearing (every step).
394 These operations are issued in the local stream at the beginning of the step
395 and therefore always complete before the local kernel launch. The non-local
396 kernel is launched after the local on the same device/context hence it is
397 inherently scheduled after the operations in the local stream (including the
398 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
399 devices with multiple hardware queues the dependency needs to be enforced.
400 We use the misc_ops_and_local_H2D_done event to record the point where
401 the local x+q H2D (and all preceding) tasks are complete and synchronize
402 with this event in the non-local stream before launching the non-bonded kernel.
404 void gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
406 const InteractionLocality iloc)
408 cu_atomdata_t *adat = nb->atdat;
409 cu_nbparam_t *nbp = nb->nbparam;
410 cu_plist_t *plist = nb->plist[iloc];
411 cu_timers_t *t = nb->timers;
412 cudaStream_t stream = nb->stream[iloc];
414 bool bCalcEner = flags & GMX_FORCE_ENERGY;
415 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
416 bool bDoTime = nb->bDoTime;
418 /* Don't launch the non-local kernel if there is no work to do.
419 Doing the same for the local kernel is more complicated, since the
420 local part of the force array also depends on the non-local kernel.
421 So to avoid complicating the code and to reduce the risk of bugs,
422 we always call the local kernel, and later (not in
423 this function) the stream wait, local f copyback and the f buffer
424 clearing. All these operations, except for the local interaction kernel,
425 are needed for the non-local interactions. The skip of the local kernel
426 call is taken care of later in this function. */
427 if (canSkipNonbondedWork(*nb, iloc))
429 plist->haveFreshList = false;
434 if (nbp->useDynamicPruning && plist->haveFreshList)
436 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
437 (TODO: ATM that's the way the timing accounting can distinguish between
438 separate prune kernel and combined force+prune, maybe we need a better way?).
440 gpu_launch_kernel_pruneonly(nb, iloc, 1);
443 if (plist->nsci == 0)
445 /* Don't launch an empty local kernel (not allowed with CUDA) */
449 /* beginning of timed nonbonded calculation section */
452 t->interaction[iloc].nb_k.openTimingRegion(stream);
455 /* Kernel launch config:
456 * - The thread block dimensions match the size of i-clusters, j-clusters,
457 * and j-cluster concurrency, in x, y, and z, respectively.
458 * - The 1D block-grid contains as many blocks as super-clusters.
460 int num_threads_z = 1;
461 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
465 int nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
468 KernelLaunchConfig config;
469 config.blockSize[0] = c_clSize;
470 config.blockSize[1] = c_clSize;
471 config.blockSize[2] = num_threads_z;
472 config.gridSize[0] = nblock;
473 config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp);
474 config.stream = stream;
478 fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
479 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
481 config.blockSize[0], config.blockSize[1], config.blockSize[2],
482 config.gridSize[0], config.gridSize[1], plist->nsci*c_numClPerSupercl,
483 c_numClPerSupercl, plist->na_c,
484 config.sharedMemorySize);
487 auto *timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
488 const auto kernel = select_nbnxn_kernel(nbp->eeltype,
491 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
493 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &bCalcFshift);
494 launchGpuKernel(kernel, config, timingEvent, "k_calc_nb", kernelArgs);
498 t->interaction[iloc].nb_k.closeTimingRegion(stream);
501 if (GMX_NATIVE_WINDOWS)
503 /* Windows: force flushing WDDM queue */
504 cudaStreamQuery(stream);
508 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
509 static inline int calc_shmem_required_prune(const int num_threads_z)
513 /* i-atom x in shared memory */
514 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
515 /* cj in shared memory, for each warp separately */
516 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
521 void gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb,
522 const InteractionLocality iloc,
525 cu_atomdata_t *adat = nb->atdat;
526 cu_nbparam_t *nbp = nb->nbparam;
527 cu_plist_t *plist = nb->plist[iloc];
528 cu_timers_t *t = nb->timers;
529 cudaStream_t stream = nb->stream[iloc];
531 bool bDoTime = nb->bDoTime;
533 if (plist->haveFreshList)
535 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
537 /* Set rollingPruningNumParts to signal that it is not set */
538 plist->rollingPruningNumParts = 0;
539 plist->rollingPruningPart = 0;
543 if (plist->rollingPruningNumParts == 0)
545 plist->rollingPruningNumParts = numParts;
549 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
553 /* Use a local variable for part and update in plist, so we can return here
554 * without duplicating the part increment code.
556 int part = plist->rollingPruningPart;
558 plist->rollingPruningPart++;
559 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
561 plist->rollingPruningPart = 0;
564 /* Compute the number of list entries to prune in this pass */
565 int numSciInPart = (plist->nsci - part)/numParts;
567 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
568 if (numSciInPart <= 0)
570 plist->haveFreshList = false;
575 GpuRegionTimer *timer = nullptr;
578 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
581 /* beginning of timed prune calculation section */
584 timer->openTimingRegion(stream);
587 /* Kernel launch config:
588 * - The thread block dimensions match the size of i-clusters, j-clusters,
589 * and j-cluster concurrency, in x, y, and z, respectively.
590 * - The 1D block-grid contains as many blocks as super-clusters.
592 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
593 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info);
594 KernelLaunchConfig config;
595 config.blockSize[0] = c_clSize;
596 config.blockSize[1] = c_clSize;
597 config.blockSize[2] = num_threads_z;
598 config.gridSize[0] = nblock;
599 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
600 config.stream = stream;
604 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
605 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
607 config.blockSize[0], config.blockSize[1], config.blockSize[2],
608 config.gridSize[0], config.gridSize[1], numSciInPart*c_numClPerSupercl,
609 c_numClPerSupercl, plist->na_c,
610 config.sharedMemorySize);
613 auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
614 constexpr char kernelName[] = "k_pruneonly";
615 const auto kernel = plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
616 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
617 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
619 /* TODO: consider a more elegant way to track which kernel has been called
620 (combined or separate 1st pass prune, rolling prune). */
621 if (plist->haveFreshList)
623 plist->haveFreshList = false;
624 /* Mark that pruning has been done */
625 nb->timers->interaction[iloc].didPrune = true;
629 /* Mark that rolling pruning has been done */
630 nb->timers->interaction[iloc].didRollingPrune = true;
635 timer->closeTimingRegion(stream);
638 if (GMX_NATIVE_WINDOWS)
640 /* Windows: force flushing WDDM queue */
641 cudaStreamQuery(stream);
645 void gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
646 nbnxn_atomdata_t *nbatom,
648 const AtomLocality atomLocality,
649 const bool copyBackNbForce)
651 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
654 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
656 /* determine interaction locality from atom locality */
657 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
659 /* extract the data */
660 cu_atomdata_t *adat = nb->atdat;
661 cu_timers_t *t = nb->timers;
662 bool bDoTime = nb->bDoTime;
663 cudaStream_t stream = nb->stream[iloc];
665 bool bCalcEner = flags & GMX_FORCE_ENERGY;
666 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
668 /* don't launch non-local copy-back if there was no non-local work to do */
669 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
674 getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
676 /* beginning of timed D2H section */
679 t->xf[atomLocality].nb_d2h.openTimingRegion(stream);
682 /* With DD the local D2H transfer can only start after the non-local
683 kernel has finished. */
684 if (iloc == InteractionLocality::Local && nb->bUseTwoStreams)
686 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
687 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
693 cu_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f + adat_begin,
694 (adat_len)*sizeof(*adat->f), stream);
697 /* After the non-local D2H is launched the nonlocal_done event can be
698 recorded which signals that the local D2H can proceed. This event is not
699 placed after the non-local kernel because we want the non-local data
701 if (iloc == InteractionLocality::NonLocal)
703 stat = cudaEventRecord(nb->nonlocal_done, stream);
704 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
707 /* only transfer energies in the local stream */
708 if (iloc == InteractionLocality::Local)
713 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
714 SHIFTS * sizeof(*nb->nbst.fshift), stream);
720 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
721 sizeof(*nb->nbst.e_lj), stream);
722 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
723 sizeof(*nb->nbst.e_el), stream);
729 t->xf[atomLocality].nb_d2h.closeTimingRegion(stream);
733 void cuda_set_cacheconfig()
737 for (int i = 0; i < eelCuNR; i++)
739 for (int j = 0; j < evdwCuNR; j++)
741 /* Default kernel 32/32 kB Shared/L1 */
742 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
743 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
744 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
745 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
746 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
751 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
752 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid &grid,
753 bool setFillerCoords,
756 const Nbnxm::AtomLocality locality,
761 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
762 GMX_ASSERT(x, "Need a valid x pointer");
764 cu_atomdata_t *adat = nb->atdat;
765 bool bDoTime = nb->bDoTime;
767 const int numColumns = grid.numColumns();
768 const int cellOffset = grid.cellOffset();
769 const int numAtomsPerCell = grid.numAtomsPerCell();
770 Nbnxm::InteractionLocality interactionLoc = gpuAtomToInteractionLocality(locality);
771 int nCopyAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
772 int copyAtomStart = grid.srcAtomBegin();
774 cudaStream_t stream = nb->stream[interactionLoc];
776 // FIXME: need to either let the local stream get to the
777 // insertNonlocalGpuDependency call or call it separately here
778 if (nCopyAtoms == 0) // empty domain
780 if (interactionLoc == Nbnxm::InteractionLocality::Local)
782 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
789 // copy of coordinates will be required if null pointer has been
790 // passed to function
791 // TODO improve this mechanism
792 bool copyCoord = (xPmeDevicePtr == nullptr);
794 // copy X-coordinate data to device
799 nb->timers->xf[locality].nb_h2d.openTimingRegion(stream);
802 rvec *devicePtrDest = reinterpret_cast<rvec *> (nb->xrvec[copyAtomStart]);
803 const rvec *devicePtrSrc = reinterpret_cast<const rvec *> (x[copyAtomStart]);
804 copyToDeviceBuffer(&devicePtrDest, devicePtrSrc, 0, nCopyAtoms,
805 stream, GpuApiCallBehavior::Async, nullptr);
809 nb->timers->xf[locality].nb_h2d.closeTimingRegion(stream);
814 else //coordinates have already been copied by PME stream
816 d_x = (rvec*) xPmeDevicePtr;
818 GMX_ASSERT(d_x, "Need a valid d_x pointer");
820 /* launch kernel on GPU */
822 KernelLaunchConfig config;
823 config.blockSize[0] = c_bufOpsThreadsPerBlock;
824 config.blockSize[1] = 1;
825 config.blockSize[2] = 1;
826 config.gridSize[0] = (grid.numCellsColumnMax()*numAtomsPerCell + c_bufOpsThreadsPerBlock - 1)/c_bufOpsThreadsPerBlock;
827 config.gridSize[1] = numColumns;
828 config.gridSize[2] = 1;
829 GMX_ASSERT(config.gridSize[0] > 0, "Can not have empty grid, early return above avoids this");
830 config.sharedMemorySize = 0;
831 config.stream = stream;
833 auto kernelFn = nbnxn_gpu_x_to_nbat_x_kernel;
834 float *xqPtr = &(adat->xq->x);
835 const int *d_atomIndices = nb->atomIndices;
836 const int *d_cxy_na = &nb->cxy_na[numColumnsMax*gridId];
837 const int *d_cxy_ind = &nb->cxy_ind[numColumnsMax*gridId];
838 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config,
848 launchGpuKernel(kernelFn, config, nullptr, "XbufferOps", kernelArgs);
850 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
853 /* F buffer operations on GPU: performs force summations and conversion from nb to rvec format. */
854 void nbnxn_gpu_add_nbat_f_to_f(const AtomLocality atomLocality,
858 GpuBufferOpsAccumulateForce accumulateForce)
860 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
862 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
863 cudaStream_t stream = nb->stream[iLocality];
865 cu_atomdata_t *adat = nb->atdat;
869 KernelLaunchConfig config;
870 config.blockSize[0] = c_bufOpsThreadsPerBlock;
871 config.blockSize[1] = 1;
872 config.blockSize[2] = 1;
873 config.gridSize[0] = ((nAtoms+1)+c_bufOpsThreadsPerBlock-1)/c_bufOpsThreadsPerBlock;
874 config.gridSize[1] = 1;
875 config.gridSize[2] = 1;
876 config.sharedMemorySize = 0;
877 config.stream = stream;
879 auto kernelFn = (accumulateForce == GpuBufferOpsAccumulateForce::True) ?
880 nbnxn_gpu_add_nbat_f_to_f_kernel<true> : nbnxn_gpu_add_nbat_f_to_f_kernel<false>;
881 const float3 *fPtr = adat->f;
882 rvec *frvec = nb->frvec;
883 const int *cell = nb->cell;
885 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config,
892 launchGpuKernel(kernelFn, config, nullptr, "FbufferOps", kernelArgs);
896 void nbnxn_launch_copy_f_to_gpu(const AtomLocality atomLocality,
897 const Nbnxm::GridSet &gridSet,
901 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
902 GMX_ASSERT(f, "Need a valid f pointer");
904 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
905 cudaStream_t stream = nb->stream[iLocality];
907 bool bDoTime = nb->bDoTime;
908 cu_timers_t *t = nb->timers;
910 int atomStart = 0, nAtoms = 0;
912 nbnxn_get_atom_range(atomLocality, gridSet, &atomStart, &nAtoms);
916 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
919 rvec *ptrDest = reinterpret_cast<rvec *> (nb->frvec[atomStart]);
920 rvec *ptrSrc = reinterpret_cast<rvec *> (f[atomStart]);
921 //copyToDeviceBuffer(&ptrDest, ptrSrc, 0, nAtoms,
922 // stream, GpuApiCallBehavior::Async, nullptr);
923 //TODO use above API call rather than direct memcpy when force has been implemented in a hostvector
924 cudaMemcpyAsync(ptrDest, ptrSrc, nAtoms*sizeof(rvec), cudaMemcpyHostToDevice,
929 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
935 void nbnxn_launch_copy_f_from_gpu(const AtomLocality atomLocality,
936 const Nbnxm::GridSet &gridSet,
940 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
941 GMX_ASSERT(f, "Need a valid f pointer");
943 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
944 cudaStream_t stream = nb->stream[iLocality];
946 bool bDoTime = nb->bDoTime;
947 cu_timers_t *t = nb->timers;
948 int atomStart, nAtoms;
950 nbnxn_get_atom_range(atomLocality, gridSet, &atomStart, &nAtoms);
954 t->xf[atomLocality].nb_d2h.openTimingRegion(stream);
957 GMX_ASSERT(nb->frvec, "Need a valid nb->frvec pointer");
958 rvec *ptrDest = reinterpret_cast<rvec *> (f[atomStart]);
959 rvec *ptrSrc = reinterpret_cast<rvec *> (nb->frvec[atomStart]);
960 //copyFromDeviceBuffer(ptrDest, &ptrSrc, 0, nAtoms,
961 // stream, GpuApiCallBehavior::Async, nullptr);
962 //TODO use above API call rather than direct memcpy when force has been implemented in a hostvector
963 cudaMemcpyAsync(ptrDest, ptrSrc, nAtoms*sizeof(rvec), cudaMemcpyDeviceToHost,
968 t->xf[atomLocality].nb_d2h.closeTimingRegion(stream);
974 void nbnxn_wait_for_gpu_force_reduction(const AtomLocality gmx_unused atomLocality,
977 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
979 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
981 cudaStream_t stream = nb->stream[iLocality];
983 cudaStreamSynchronize(stream);