2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
39 * CUDA kernels for GPU versions of copy_rvec_to_nbat_real and add_nbat_f_to_f.
41 * \author Alan Gray <alang@nvidia.com>
42 * \author Jon Vincent <jvincent@nvidia.com>
45 #include "gromacs/gpu_utils/vectype_ops.cuh"
46 #include "gromacs/nbnxm/nbnxm.h"
48 /*! \brief CUDA kernel for transforming position coordinates from rvec to nbnxm layout.
51 * - improve/simplify/document use of cxy_na and na_round
52 * - rename kernel so naming matches with the other NBNXM kernels;
53 * - enable separate compilation unit
55 * \param[in] numColumns extent of cell-level parallelism
56 * \param[out] xnb position buffer in nbnxm layout
57 * \param[in] setFillerCoords tells whether to set the coordinates of the filler particles
58 * \param[in] x position buffer
59 * \param[in] a atom index mapping stride between atoms in memory
60 * \param[in] cxy_na array of extents
61 * \param[in] cxy_ind array of cell indices
62 * \param[in] cellOffset first cell
63 * \param[in] numAtomsPerCell number of atoms per cell
65 __global__ void nbnxn_gpu_x_to_nbat_x_kernel(int numColumns,
66 float * __restrict__ xnb,
68 const rvec * __restrict__ x,
69 const int * __restrict__ a,
70 const int * __restrict__ cxy_na,
71 const int * __restrict__ cxy_ind,
76 __global__ void nbnxn_gpu_x_to_nbat_x_kernel(int numColumns,
77 float * __restrict__ xnb,
79 const rvec * __restrict__ x,
80 const int * __restrict__ a,
81 const int * __restrict__ cxy_na,
82 const int * __restrict__ cxy_ind,
88 const float farAway = -1000000.0f;
90 /* map cell-level parallelism to y component of CUDA block index */
97 int a0 = (cellOffset + cxy_ind[cxy])*numAtomsPerCell;
101 // TODO: This can be done more efficiently
103 (cxy_ind[cxy+1] - cxy_ind[cxy])*numAtomsPerCell;
107 /* We fill only the real particle locations.
108 * We assume the filling entries at the end have been
109 * properly set before during pair-list generation.
114 /* map parallelism within a cell to x component of CUDA block index linearized
115 * with threads within a block */
117 i = blockIdx.x*blockDim.x+threadIdx.x;
121 // destination address where x shoud be stored in nbnxm layout
122 float3 *x_dest = (float3 *)&xnb[j0 + 4*i];
124 /* perform conversion of each element */
129 *x_dest = *((float3 *)x[a[a0 + i]]);
133 *x_dest = make_float3(farAway);
140 /*! \brief CUDA kernel to add part of the force array(s) from nbnxn_atomdata_t to f
142 * \param[in] fnb Force in nbat format
143 * \param[in,out] f Force buffer to be reduced into
144 * \param[in] cell Cell index mapping
145 * \param[in] a0 start atom index
146 * \param[in] a1 end atom index
147 * \param[in] stride stride between atoms in memory
149 template <bool accumulateForce>
151 nbnxn_gpu_add_nbat_f_to_f_kernel(const float3 *__restrict__ fnb,
153 const int *__restrict__ cell,
156 template <bool accumulateForce>
158 nbnxn_gpu_add_nbat_f_to_f_kernel(const float3 *__restrict__ fnb,
160 const int *__restrict__ cell,
165 /* map particle-level parallelism to 1D CUDA thread and block index */
166 int threadIndex = blockIdx.x*blockDim.x+threadIdx.x;
168 /* perform addition for each particle*/
169 if (threadIndex < nAtoms)
172 int i = cell[atomStart+threadIndex];
173 float3 *f_dest = (float3 *)&f[atomStart+threadIndex][XX];