2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements PME GPU spline calculation and charge spreading in CUDA.
40 * TODO: consider always pre-sorting particles (as in DD case).
42 * \author Aleksei Iupinov <a.yupinov@gmail.com>
49 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
52 #include "pme_gpu_utils.h"
56 * This define affects the spline calculation behaviour in the kernel.
57 * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing (order) spline values and derivatives).
58 * 1: (order) threads do redundant work on this same task, each one stores only a single theta and single dtheta into global arrays.
59 * The only efficiency difference is less global store operations, countered by more redundant spline computation.
61 * TODO: estimate if this should be a boolean parameter (and add it to the unit test if so).
63 #define PME_GPU_PARALLEL_SPLINE 0
67 * General purpose function for loading atom-related data from global to shared memory.
69 * \tparam[in] T Data type (float/int/...)
70 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for in the size of the shared memory array.
71 * \tparam[in] dataCountPerAtom Number of data elements per single atom (e.g. DIM for an rvec coordinates array).
72 * \param[in] kernelParams Input PME CUDA data in constant memory.
73 * \param[out] sm_destination Shared memory array for output.
74 * \param[in] gm_source Global memory array for input.
77 const int atomsPerBlock,
78 const int dataCountPerAtom>
79 __device__ __forceinline__
80 void pme_gpu_stage_atom_data(const PmeGpuCudaKernelParams kernelParams,
81 T * __restrict__ sm_destination,
82 const T * __restrict__ gm_source)
84 static_assert(c_usePadding, "With padding disabled, index checking should be fixed to account for spline theta/dtheta per-warp alignment");
85 const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x;
86 const int threadLocalIndex = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x) + threadIdx.x;
87 const int localIndex = threadLocalIndex;
88 const int globalIndexBase = blockIndex * atomsPerBlock * dataCountPerAtom;
89 const int globalIndex = globalIndexBase + localIndex;
90 const int globalCheck = pme_gpu_check_atom_data_index(globalIndex, kernelParams.atoms.nAtoms * dataCountPerAtom);
91 if ((localIndex < atomsPerBlock * dataCountPerAtom) & globalCheck)
93 assert(isfinite(float(gm_source[globalIndex])));
94 sm_destination[localIndex] = gm_source[globalIndex];
99 * PME GPU spline parameter and gridline indices calculation.
100 * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines().
101 * First stage of the whole kernel.
103 * \tparam[in] order PME interpolation order.
104 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for
105 * in the sizes of the shared memory arrays.
106 * \param[in] kernelParams Input PME CUDA data in constant memory.
107 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
108 * \param[in] sm_coordinates Atom coordinates in the shared memory.
109 * \param[in] sm_coefficients Atom charges/coefficients in the shared memory.
110 * \param[out] sm_theta Atom spline values in the shared memory.
111 * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory.
113 template <const int order,
114 const int atomsPerBlock>
115 __device__ __forceinline__ void calculate_splines(const PmeGpuCudaKernelParams kernelParams,
116 const int atomIndexOffset,
117 const float3 * __restrict__ sm_coordinates,
118 const float * __restrict__ sm_coefficients,
119 float * __restrict__ sm_theta,
120 int * __restrict__ sm_gridlineIndices)
122 /* Global memory pointers for output */
123 float * __restrict__ gm_theta = kernelParams.atoms.d_theta;
124 float * __restrict__ gm_dtheta = kernelParams.atoms.d_dtheta;
125 int * __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices;
127 const int atomsPerWarp = c_pmeSpreadGatherAtomsPerWarp;
129 /* Fractional coordinates */
130 __shared__ float sm_fractCoords[atomsPerBlock * DIM];
132 /* Thread index w.r.t. block */
133 const int threadLocalId = (threadIdx.z * (blockDim.x * blockDim.y))
134 + (threadIdx.y * blockDim.x) + threadIdx.x;
135 /* Warp index w.r.t. block - could probably be obtained easier? */
136 const int warpIndex = threadLocalId / warp_size;
137 /* Thread index w.r.t. warp */
138 const int threadWarpIndex = threadLocalId % warp_size;
139 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
140 const int atomWarpIndex = threadWarpIndex % atomsPerWarp;
141 /* Atom index w.r.t. block/shared memory */
142 const int atomIndexLocal = warpIndex * atomsPerWarp + atomWarpIndex;
144 /* Atom index w.r.t. global memory */
145 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
146 /* Spline contribution index in one dimension */
147 const int orderIndex = threadWarpIndex / (atomsPerWarp * DIM);
148 /* Dimension index */
149 const int dimIndex = (threadWarpIndex - orderIndex * (atomsPerWarp * DIM)) / atomsPerWarp;
151 /* Multi-purpose index of rvec/ivec atom data */
152 const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex;
154 /* Spline parameters need a working buffer.
155 * With PME_GPU_PARALLEL_SPLINE == 0 it is just a local array of (order) values for some of the threads, which is fine;
156 * With PME_GPU_PARALLEL_SPLINE == 1 (order) times more threads are involved, so the shared memory is used to avoid overhead.
157 * The buffer's size, striding and indexing are adjusted accordingly.
158 * The buffer is accessed with SPLINE_DATA_PTR and SPLINE_DATA macros.
160 #if PME_GPU_PARALLEL_SPLINE
161 const int splineDataStride = atomsPerBlock * DIM;
162 const int splineDataIndex = sharedMemoryIndex;
163 __shared__ float sm_splineData[splineDataStride * order];
164 float *splineDataPtr = sm_splineData;
166 const int splineDataStride = 1;
167 const int splineDataIndex = 0;
168 float splineData[splineDataStride * order];
169 float *splineDataPtr = splineData;
172 #define SPLINE_DATA_PTR(i) (splineDataPtr + ((i) * splineDataStride + splineDataIndex))
173 #define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i))
175 const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1));
176 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
178 if (localCheck && globalCheck)
180 /* Indices interpolation */
184 int tableIndex, tInt;
186 const float3 x = sm_coordinates[atomIndexLocal];
187 /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset
188 * puts them into local memory(!) instead of accessing the constant memory directly.
189 * That's the reason for the switch, to unroll explicitly.
190 * The commented parts correspond to the 0 components of the recipbox.
195 tableIndex = kernelParams.grid.tablesOffsets[XX];
196 n = kernelParams.grid.realGridSizeFP[XX];
197 t = x.x * kernelParams.current.recipBox[dimIndex][XX] + x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
201 tableIndex = kernelParams.grid.tablesOffsets[YY];
202 n = kernelParams.grid.realGridSizeFP[YY];
203 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
207 tableIndex = kernelParams.grid.tablesOffsets[ZZ];
208 n = kernelParams.grid.realGridSizeFP[ZZ];
209 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x.z * kernelParams.current.recipBox[dimIndex][ZZ];
212 const float shift = c_pmeMaxUnitcellShift;
213 /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */
216 sm_fractCoords[sharedMemoryIndex] = t - tInt;
219 assert(tInt < c_pmeNeighborUnitcellCount * n);
221 // TODO have shared table for both parameters to share the fetch, as index is always same?
222 // TODO compare texture/LDG performance
223 sm_fractCoords[sharedMemoryIndex] +=
224 fetchFromParamLookupTable(kernelParams.grid.d_fractShiftsTable,
225 kernelParams.fractShiftsTableTexture,
227 sm_gridlineIndices[sharedMemoryIndex] =
228 fetchFromParamLookupTable(kernelParams.grid.d_gridlineIndicesTable,
229 kernelParams.gridlineIndicesTableTexture,
231 gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex];
234 /* B-spline calculation */
236 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
240 int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
242 const float dr = sm_fractCoords[sharedMemoryIndex];
243 assert(isfinite(dr));
245 /* dr is relative offset from lower cell limit */
246 *SPLINE_DATA_PTR(order - 1) = 0.0f;
247 *SPLINE_DATA_PTR(1) = dr;
248 *SPLINE_DATA_PTR(0) = 1.0f - dr;
251 for (int k = 3; k < order; k++)
253 div = 1.0f / (k - 1.0f);
254 *SPLINE_DATA_PTR(k - 1) = div * dr * SPLINE_DATA(k - 2);
256 for (int l = 1; l < (k - 1); l++)
258 *SPLINE_DATA_PTR(k - l - 1) = div * ((dr + l) * SPLINE_DATA(k - l - 2) + (k - l - dr) * SPLINE_DATA(k - l - 1));
260 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
263 const int thetaIndexBase = getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex);
264 const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order;
266 /* Differentiation and storing the spline derivatives (dtheta) */
267 #if !PME_GPU_PARALLEL_SPLINE
268 // With PME_GPU_PARALLEL_SPLINE == 1, o is already set to orderIndex;
269 // With PME_GPU_PARALLEL_SPLINE == 0, we loop o over range(order).
271 for (o = 0; o < order; o++)
274 const int thetaIndex = getSplineParamIndex<order, atomsPerWarp>(thetaIndexBase, dimIndex, o);
275 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
277 const float dtheta = ((o > 0) ? SPLINE_DATA(o - 1) : 0.0f) - SPLINE_DATA(o);
278 assert(isfinite(dtheta));
279 gm_dtheta[thetaGlobalIndex] = dtheta;
282 div = 1.0f / (order - 1.0f);
283 *SPLINE_DATA_PTR(order - 1) = div * dr * SPLINE_DATA(order - 2);
285 for (int k = 1; k < (order - 1); k++)
287 *SPLINE_DATA_PTR(order - k - 1) = div * ((dr + k) * SPLINE_DATA(order - k - 2) + (order - k - dr) * SPLINE_DATA(order - k - 1));
289 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
291 /* Storing the spline values (theta) */
292 #if !PME_GPU_PARALLEL_SPLINE
293 // See comment for the loop above
295 for (o = 0; o < order; o++)
298 const int thetaIndex = getSplineParamIndex<order, atomsPerWarp>(thetaIndexBase, dimIndex, o);
299 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
301 sm_theta[thetaIndex] = SPLINE_DATA(o);
302 assert(isfinite(sm_theta[thetaIndex]));
303 gm_theta[thetaGlobalIndex] = SPLINE_DATA(o);
310 * Charge spreading onto the grid.
311 * This corresponds to the CPU function spread_coefficients_bsplines_thread().
312 * Second stage of the whole kernel.
314 * \tparam[in] order PME interpolation order.
315 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
316 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
317 * \param[in] kernelParams Input PME CUDA data in constant memory.
318 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
319 * \param[in] sm_coefficients Atom coefficents/charges in the shared memory.
320 * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory.
321 * \param[in] sm_theta Atom spline values in the shared memory.
324 const int order, const bool wrapX, const bool wrapY>
325 __device__ __forceinline__ void spread_charges(const PmeGpuCudaKernelParams kernelParams,
327 const float * __restrict__ sm_coefficients,
328 const int * __restrict__ sm_gridlineIndices,
329 const float * __restrict__ sm_theta)
331 /* Global memory pointer to the output grid */
332 float * __restrict__ gm_grid = kernelParams.grid.d_realGrid;
335 const int atomsPerWarp = c_pmeSpreadGatherAtomsPerWarp;
337 const int nx = kernelParams.grid.realGridSize[XX];
338 const int ny = kernelParams.grid.realGridSize[YY];
339 const int nz = kernelParams.grid.realGridSize[ZZ];
340 const int pny = kernelParams.grid.realGridSizePadded[YY];
341 const int pnz = kernelParams.grid.realGridSizePadded[ZZ];
343 const int offx = 0, offy = 0, offz = 0; // unused for now
345 const int atomIndexLocal = threadIdx.z;
346 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
348 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
349 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
350 if (chargeCheck & globalCheck)
352 // Spline Y/Z coordinates
353 const int ithy = threadIdx.y;
354 const int ithz = threadIdx.x;
355 const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx;
356 int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy + ithy;
357 if (wrapY & (iy >= ny))
361 int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
367 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
368 const int atomWarpIndex = atomIndexLocal % atomsPerWarp;
369 /* Warp index w.r.t. block - could probably be obtained easier? */
370 const int warpIndex = atomIndexLocal / atomsPerWarp;
372 const int splineIndexBase = getSplineParamIndexBase<order, atomsPerWarp>(warpIndex, atomWarpIndex);
373 const int splineIndexZ = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, ZZ, ithz);
374 const float thetaZ = sm_theta[splineIndexZ];
375 const int splineIndexY = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, YY, ithy);
376 const float thetaY = sm_theta[splineIndexY];
377 const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal];
378 assert(isfinite(constVal));
379 const int constOffset = iy * pnz + iz;
382 for (int ithx = 0; (ithx < order); ithx++)
384 int ix = ixBase + ithx;
385 if (wrapX & (ix >= nx))
389 const int gridIndexGlobal = ix * pny * pnz + constOffset;
390 const int splineIndexX = getSplineParamIndex<order, atomsPerWarp>(splineIndexBase, XX, ithx);
391 const float thetaX = sm_theta[splineIndexX];
392 assert(isfinite(thetaX));
393 assert(isfinite(gm_grid[gridIndexGlobal]));
394 atomicAdd(gm_grid + gridIndexGlobal, thetaX * constVal);
400 * A spline computation and charge spreading kernel function.
402 * \tparam[in] order PME interpolation order.
403 * \tparam[in] computeSplines A boolean which tells if the spline parameter and
404 * gridline indices' computation should be performed.
405 * \tparam[in] spreadCharges A boolean which tells if the charge spreading should be performed.
406 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
407 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
408 * \param[in] kernelParams Input PME CUDA data in constant memory.
412 const bool computeSplines,
413 const bool spreadCharges,
417 __launch_bounds__(c_spreadMaxThreadsPerBlock)
418 CLANG_DISABLE_OPTIMIZATION_ATTRIBUTE
419 __global__ void pme_spline_and_spread_kernel(const PmeGpuCudaKernelParams kernelParams)
421 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / c_pmeSpreadGatherThreadsPerAtom;
422 // Gridline indices, ivec
423 __shared__ int sm_gridlineIndices[atomsPerBlock * DIM];
425 __shared__ float sm_coefficients[atomsPerBlock];
427 __shared__ float sm_theta[atomsPerBlock * DIM * order];
429 const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x;
430 const int atomIndexOffset = blockIndex * atomsPerBlock;
432 /* Early return for fully empty blocks at the end
433 * (should only happen for billions of input atoms)
435 if (atomIndexOffset >= kernelParams.atoms.nAtoms)
440 /* Staging coefficients/charges for both spline and spread */
441 pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(kernelParams, sm_coefficients, kernelParams.atoms.d_coefficients);
445 /* Staging coordinates */
446 __shared__ float sm_coordinates[DIM * atomsPerBlock];
447 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM>(kernelParams, sm_coordinates, kernelParams.atoms.d_coordinates);
450 calculate_splines<order, atomsPerBlock>(kernelParams, atomIndexOffset, (const float3 *)sm_coordinates,
451 sm_coefficients, sm_theta, sm_gridlineIndices);
456 /* Staging the data for spread
457 * (the data is assumed to be in GPU global memory with proper layout already,
458 * as in after running the spline kernel)
460 /* Spline data - only thetas (dthetas will only be needed in gather) */
461 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM * order>(kernelParams, sm_theta, kernelParams.atoms.d_theta);
462 /* Gridline indices */
463 pme_gpu_stage_atom_data<int, atomsPerBlock, DIM>(kernelParams, sm_gridlineIndices, kernelParams.atoms.d_gridlineIndices);
471 spread_charges<order, wrapX, wrapY>(kernelParams, atomIndexOffset, sm_coefficients, sm_gridlineIndices, sm_theta);
475 //! Kernel instantiations
476 template __global__ void pme_spline_and_spread_kernel<4, true, true, true, true>(const PmeGpuCudaKernelParams);
477 template __global__ void pme_spline_and_spread_kernel<4, true, false, true, true>(const PmeGpuCudaKernelParams);
478 template __global__ void pme_spline_and_spread_kernel<4, false, true, true, true>(const PmeGpuCudaKernelParams);