2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements PME OpenCL spline parameter computation and charge spread kernels.
38 * When including this and other PME OpenCL kernel files, plenty of common
39 * constants/macros are expected to be defined (such as "order" which is PME interpolation order).
40 * For details, please see how pme-program.cl is compiled in pme-gpu-program-impl-ocl.cpp.
42 * This file's kernels specifically expect the following definitions:
44 * - atomsPerBlock which expresses how many atoms are processed by a single work group
45 * - order which is a PME interpolation order
46 * - computeSplines and spreadCharges must evaluate to either true or false to specify which
47 * kernel falvor is being compiled
48 * - wrapX and wrapY must evaluate to either true or false to specify whether the grid overlap
49 * in dimension X/Y is to be used
51 * \author Aleksei Iupinov <a.yupinov@gmail.com>
54 #include "pme-gpu-types.h"
55 #include "pme-gpu-utils.clh"
56 #include "gromacs/gpu_utils/vectype_ops.clh"
59 * This define affects the spline calculation behaviour in the kernel.
60 * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing (order) spline values and derivatives).
61 * 1: (order) threads do redundant work on this same task, each one stores only a single theta and single dtheta into global arrays.
62 * The only efficiency difference is less global store operations, countered by more redundant spline computation.
64 * TODO: estimate if this should be a boolean parameter (and add it to the unit test if so).
66 #define PME_GPU_PARALLEL_SPLINE 0
68 #ifndef COMPILE_SPREAD_HELPERS_ONCE
69 #define COMPILE_SPREAD_HELPERS_ONCE
72 * General purpose function for loading atom-related data from global to shared memory.
74 * \param[in] kernelParams Input PME GPU data in constant memory.
75 * \param[out] sm_destination Local memory array for output.
76 * \param[in] gm_source Global memory array for input.
77 * \param[in] dataCountPerAtom Number of data elements per single atom (e.g. DIM for an rvec coordinates array).
80 inline void pme_gpu_stage_atom_data(const struct PmeOpenCLKernelParams kernelParams,
81 __local float * __restrict__ sm_destination,
82 __global const float * __restrict__ gm_source,
83 const int dataCountPerAtom)
85 const size_t threadLocalIndex = ((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0));
86 const size_t localIndex = threadLocalIndex;
87 const size_t globalIndexBase = get_group_id(XX) * atomsPerBlock * dataCountPerAtom;
88 const size_t globalIndex = globalIndexBase + localIndex;
89 const int globalCheck = pme_gpu_check_atom_data_index(globalIndex, kernelParams.atoms.nAtoms * dataCountPerAtom);
90 if ((localIndex < atomsPerBlock * dataCountPerAtom) & globalCheck)
92 assert(isfinite(float(gm_source[globalIndex])));
93 sm_destination[localIndex] = gm_source[globalIndex];
98 * PME GPU spline parameter and gridline indices calculation.
99 * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines().
100 * First stage of the whole kernel.
102 * \param[in] kernelParams Input PME GPU data in constant memory.
103 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
104 * \param[in] sm_coordinates Atom coordinates in the shared memory (rvec)
105 * \param[in] sm_coefficients Atom charges/coefficients in the shared memory.
106 * \param[out] sm_theta Atom spline values in the shared memory.
107 * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory.
108 * \param[out] sm_fractCoords Atom fractional coordinates (rvec)
109 * \param[out] gm_theta Atom spline parameter values
110 * \param[out] gm_dtheta Atom spline parameter derivatives
111 * \param[out] gm_gridlineIndices Same as \p sm_gridlineIndices but in global memory.
112 * \param[in] gm_fractShiftsTable Atom fractional coordinates correction table
113 * \param[in] gm_gridlineIndicesTable Atom fractional coordinates correction table
115 inline void calculate_splines(const struct PmeOpenCLKernelParams kernelParams,
116 const int atomIndexOffset,
117 __local const float * __restrict__ sm_coordinates,
118 __local const float * __restrict__ sm_coefficients,
119 __local float * __restrict__ sm_theta,
120 __local int * __restrict__ sm_gridlineIndices,
121 __local float * __restrict__ sm_fractCoords,
122 __global float * __restrict__ gm_theta,
123 __global float * __restrict__ gm_dtheta,
124 __global int * __restrict__ gm_gridlineIndices,
125 __global const float * __restrict__ gm_fractShiftsTable,
126 __global const int * __restrict__ gm_gridlineIndicesTable)
128 /* Thread index w.r.t. block */
129 const int threadLocalIndex = ((get_local_id(2) * get_local_size(1) + get_local_id(1)) * get_local_size(0) + get_local_id(0));
130 /* Warp index w.r.t. block - could probably be obtained easier? */
131 const int warpIndex = threadLocalIndex / warp_size;
132 /* Thread index w.r.t. warp */
133 const int threadWarpIndex = threadLocalIndex % warp_size;
134 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
135 const int atomWarpIndex = threadWarpIndex % atomsPerWarp;
136 /* Atom index w.r.t. block/shared memory */
137 const int atomIndexLocal = warpIndex * atomsPerWarp + atomWarpIndex;
139 /* Atom index w.r.t. global memory */
140 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
141 /* Spline contribution index in one dimension */
142 const int orderIndex = threadWarpIndex / (atomsPerWarp * DIM);
143 /* Dimension index */
144 const int dimIndex = (threadWarpIndex - orderIndex * (atomsPerWarp * DIM)) / atomsPerWarp;
146 /* Multi-purpose index of rvec/ivec atom data */
147 const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex;
149 /* Spline parameters need a working buffer.
150 * With PME_GPU_PARALLEL_SPLINE == 0 it is just a local array of (order) values for some of the threads, which is fine;
151 * With PME_GPU_PARALLEL_SPLINE == 1 (order) times more threads are involved, so the shared memory is used to avoid overhead.
152 * The buffer's size, striding and indexing are adjusted accordingly.
153 * The buffer is accessed with SPLINE_DATA_PTR and SPLINE_DATA macros.
155 #if PME_GPU_PARALLEL_SPLINE
156 #define splineDataStride (atomsPerBlock * DIM)
157 const int splineDataIndex = sharedMemoryIndex;
158 __local float sm_splineData[splineDataStride * order];
159 __local float *splineDataPtr = sm_splineData;
161 #define splineDataStride 1
162 const int splineDataIndex = 0;
163 float splineData[splineDataStride * order];
164 float *splineDataPtr = splineData;
167 #define SPLINE_DATA_PTR(i) (splineDataPtr + ((i) * splineDataStride + splineDataIndex))
168 #define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i))
170 const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1));
171 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
173 if (localCheck && globalCheck)
175 /* Indices interpolation */
178 int tableIndex, tInt;
180 const float3 x = vload3(atomIndexLocal, sm_coordinates);
182 /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset
183 * puts them into local memory(!) instead of accessing the constant memory directly.
184 * That's the reason for the switch, to unroll explicitly.
185 * The commented parts correspond to the 0 components of the recipbox.
190 tableIndex = kernelParams.grid.tablesOffsets[XX];
191 n = kernelParams.grid.realGridSizeFP[XX];
192 t = x.x * kernelParams.current.recipBox[dimIndex][XX] + x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
196 tableIndex = kernelParams.grid.tablesOffsets[YY];
197 n = kernelParams.grid.realGridSizeFP[YY];
198 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
202 tableIndex = kernelParams.grid.tablesOffsets[ZZ];
203 n = kernelParams.grid.realGridSizeFP[ZZ];
204 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x.z * kernelParams.current.recipBox[dimIndex][ZZ];
207 const float shift = c_pmeMaxUnitcellShift;
209 /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */
212 sm_fractCoords[sharedMemoryIndex] = t - tInt;
215 assert(tInt < c_pmeNeighborUnitcellCount * n);
216 sm_fractCoords[sharedMemoryIndex] += gm_fractShiftsTable[tableIndex];
217 sm_gridlineIndices[sharedMemoryIndex] = gm_gridlineIndicesTable[tableIndex];
218 gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex];
221 /* B-spline calculation */
223 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
227 int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
229 const float dr = sm_fractCoords[sharedMemoryIndex];
230 assert(isfinite(dr));
232 /* dr is relative offset from lower cell limit */
233 *SPLINE_DATA_PTR(order - 1) = 0.0f;
234 *SPLINE_DATA_PTR(1) = dr;
235 *SPLINE_DATA_PTR(0) = 1.0f - dr;
238 for (int k = 3; k < order; k++)
240 div = 1.0f / (k - 1.0f);
241 *SPLINE_DATA_PTR(k - 1) = div * dr * SPLINE_DATA(k - 2);
243 for (int l = 1; l < (k - 1); l++)
245 *SPLINE_DATA_PTR(k - l - 1) = div * ((dr + l) * SPLINE_DATA(k - l - 2) + (k - l - dr) * SPLINE_DATA(k - l - 1));
247 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
250 const int thetaIndexBase = getSplineParamIndexBase(warpIndex, atomWarpIndex);
251 const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order;
253 /* Differentiation and storing the spline derivatives (dtheta) */
254 #if !PME_GPU_PARALLEL_SPLINE
255 // With PME_GPU_PARALLEL_SPLINE == 1, o is already set to orderIndex;
256 // With PME_GPU_PARALLEL_SPLINE == 0, we loop o over range(order).
258 for (o = 0; o < order; o++)
261 const int thetaIndex = getSplineParamIndex(thetaIndexBase, dimIndex, o);
262 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
264 const float dtheta = ((o > 0) ? SPLINE_DATA(o - 1) : 0.0f) - SPLINE_DATA(o);
265 assert(isfinite(dtheta));
266 gm_dtheta[thetaGlobalIndex] = dtheta;
269 div = 1.0f / (order - 1.0f);
270 *SPLINE_DATA_PTR(order - 1) = div * dr * SPLINE_DATA(order - 2);
272 for (int k = 1; k < (order - 1); k++)
274 *SPLINE_DATA_PTR(order - k - 1) = div * ((dr + k) * SPLINE_DATA(order - k - 2) + (order - k - dr) * SPLINE_DATA(order - k - 1));
276 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
278 /* Storing the spline values (theta) */
279 #if !PME_GPU_PARALLEL_SPLINE
280 // See comment for the loop above
282 for (o = 0; o < order; o++)
285 const int thetaIndex = getSplineParamIndex(thetaIndexBase, dimIndex, o);
286 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
288 sm_theta[thetaIndex] = SPLINE_DATA(o);
289 assert(isfinite(sm_theta[thetaIndex]));
290 gm_theta[thetaGlobalIndex] = SPLINE_DATA(o);
297 * Charge spreading onto the grid.
298 * This corresponds to the CPU function spread_coefficients_bsplines_thread().
299 * Second stage of the whole kernel.
301 * \param[in] kernelParams Input PME GPU data in constant memory.
302 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
303 * \param[in] sm_coefficients Atom coefficents/charges in the shared memory.
304 * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory.
305 * \param[in] sm_theta Atom spline values in the shared memory.
306 * \param[out] gm_grid Global 3D grid for spreading.
308 inline void spread_charges(const struct PmeOpenCLKernelParams kernelParams,
310 __local const float * __restrict__ sm_coefficients,
311 __local const int * __restrict__ sm_gridlineIndices,
312 __local const float * __restrict__ sm_theta,
313 __global float * __restrict__ gm_grid)
315 const int nx = kernelParams.grid.realGridSize[XX];
316 const int ny = kernelParams.grid.realGridSize[YY];
317 const int nz = kernelParams.grid.realGridSize[ZZ];
318 const int pny = kernelParams.grid.realGridSizePadded[YY];
319 const int pnz = kernelParams.grid.realGridSizePadded[ZZ];
321 const int offx = 0, offy = 0, offz = 0; // unused for now
323 const int atomIndexLocal = get_local_id(ZZ);
324 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
326 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
327 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
328 if (chargeCheck & globalCheck)
330 // Spline Y/Z coordinates
331 const int ithy = get_local_id(YY);
332 const int ithz = get_local_id(XX);
333 const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx;
334 int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy + ithy;
335 if (wrapY & (iy >= ny))
339 int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
345 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
346 const int atomWarpIndex = atomIndexLocal % atomsPerWarp;
347 /* Warp index w.r.t. block - could probably be obtained easier? */
348 const int warpIndex = atomIndexLocal / atomsPerWarp;
350 const int splineIndexBase = getSplineParamIndexBase(warpIndex, atomWarpIndex);
351 const int splineIndexZ = getSplineParamIndex(splineIndexBase, ZZ, ithz);
352 const float thetaZ = sm_theta[splineIndexZ];
353 const int splineIndexY = getSplineParamIndex(splineIndexBase, YY, ithy);
354 const float thetaY = sm_theta[splineIndexY];
355 const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal];
356 assert(isfinite(constVal));
357 const int constOffset = iy * pnz + iz;
360 for (int ithx = 0; (ithx < order); ithx++)
362 int ix = ixBase + ithx;
363 if (wrapX & (ix >= nx))
367 const int gridIndexGlobal = ix * pny * pnz + constOffset;
368 const int splineIndexX = getSplineParamIndex(splineIndexBase, XX, ithx);
369 const float thetaX = sm_theta[splineIndexX];
370 assert(isfinite(thetaX));
371 assert(isfinite(gm_grid[gridIndexGlobal]));
372 atomicAdd_g_f(gm_grid + gridIndexGlobal, thetaX * constVal);
377 #endif //COMPILE_SPREAD_HELPERS_ONCE
380 * A spline computation and/or charge spreading kernel function.
381 * Please see the file description for additional defines which this kernel expects.
383 * \param[in] kernelParams Input PME GPU data in constant memory.
384 * \param[in,out] gm_theta Atom spline parameter values
385 * \param[out] gm_dtheta Atom spline parameter derivatives
386 * \param[in,out] gm_gridlineIndices Atom gridline indices (ivec)
387 * \param[out] gm_grid Global 3D grid for charge spreading.
388 * \param[in] gm_fractShiftsTable Atom fractional coordinates correction table
389 * \param[in] gm_gridlineIndicesTable Atom fractional coordinates correction table
390 * \param[in] gm_coefficients Atom charges/coefficients.
391 * \param[in] gm_coordinates Atom coordinates (rvec)
393 __attribute__((reqd_work_group_size(order, order, atomsPerBlock)))
394 __kernel void CUSTOMIZED_KERNEL_NAME(pme_spline_and_spread_kernel)(const struct PmeOpenCLKernelParams kernelParams,
395 __global float * __restrict__ gm_theta,
396 __global float * __restrict__ gm_dtheta,
397 __global int * __restrict__ gm_gridlineIndices,
398 __global float *__restrict__ gm_grid,
399 __global const float * __restrict__ gm_fractShiftsTable,
400 __global const int * __restrict__ gm_gridlineIndicesTable,
401 __global const float * __restrict__ gm_coefficients,
402 __global const float * __restrict__ gm_coordinates)
404 // Gridline indices, ivec
405 __local int sm_gridlineIndices[atomsPerBlock * DIM];
407 __local float sm_coefficients[atomsPerBlock];
409 __local float sm_theta[atomsPerBlock * DIM * order];
410 // Fractional coordinates - only for spline computation
411 __local float sm_fractCoords[atomsPerBlock * DIM];
412 // Staging coordinates - only for spline computation
413 __local float sm_coordinates[DIM * atomsPerBlock];
415 const int atomIndexOffset = get_group_id(XX) * atomsPerBlock;
417 /* Staging coefficients/charges for both spline and spread */
418 pme_gpu_stage_atom_data(kernelParams, sm_coefficients, gm_coefficients, 1);
422 /* Staging coordinates */
423 pme_gpu_stage_atom_data(kernelParams, sm_coordinates, gm_coordinates, DIM);
425 barrier(CLK_LOCAL_MEM_FENCE);
426 calculate_splines(kernelParams, atomIndexOffset, sm_coordinates,
427 sm_coefficients, sm_theta, sm_gridlineIndices,
428 sm_fractCoords, gm_theta, gm_dtheta, gm_gridlineIndices,
429 gm_fractShiftsTable, gm_gridlineIndicesTable);
430 #if !defined(_AMD_SOURCE_) && !defined(_NVIDIA_SOURCE_)
431 /* This is only here for execution of e.g. 32-sized warps on 16-wide hardware; this was gmx_syncwarp() in CUDA.
434 barrier(CLK_LOCAL_MEM_FENCE);
439 /* Staging the data for spread
440 * (the data is assumed to be in GPU global memory with proper layout already,
441 * as in after running the spline kernel)
443 /* Spline data - only thetas (dthetas will only be needed in gather) */
444 pme_gpu_stage_atom_data(kernelParams, sm_theta, gm_theta, DIM * order);
445 /* Gridline indices - they're actually int and not float, but C99 is angry about overloads */
446 pme_gpu_stage_atom_data(kernelParams, (__local float *)sm_gridlineIndices, (__global const float *)gm_gridlineIndices, DIM);
448 barrier(CLK_LOCAL_MEM_FENCE);
453 spread_charges(kernelParams, atomIndexOffset, sm_coefficients, sm_gridlineIndices, sm_theta, gm_grid);