2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements PME OpenCL solving kernel.
38 * When including this and other PME OpenCL kernel files, plenty of common
39 * constants/macros are expected to be defined.
40 * For details, please see how pme-program.cl is compiled in pme-gpu-program-impl-ocl.cpp.
42 * This file's solving kernel specifically expects the following definitions for its flavors:
44 * - gridOrdering must be defined to either XYZ or YZX
45 * and corresponds to the dimension order of the grid (GridOrdering enum in CUDA kernels);
46 * - computeEnergyAndVirial must evaluate to true or false, and expresses
47 * whether the reduction is performed.
49 * \author Aleksei Iupinov <a.yupinov@gmail.com>
52 #include "pme-gpu-types.h"
53 #include "gromacs/gpu_utils/vectype_ops.clh"
56 * PME complex grid solver kernel function.
57 * Please see the file description for additional defines which this kernel expects.
59 * \param[in] kernelParams Input PME GPU data in constant memory.
60 * \param[in] gm_splineModuli B-Spline moduli.
61 * \param[out] gm_virialAndEnergy Reduced virial and enrgy (only with computeEnergyAndVirial == true)
62 * \param[in,out] gm_grid Fourier grid to transform.
64 __attribute__((work_group_size_hint(c_solveMaxWorkGroupSize, 1, 1)))
65 __kernel void CUSTOMIZED_KERNEL_NAME(pme_solve_kernel)(const struct PmeOpenCLKernelParams kernelParams,
66 __global const float * __restrict__ gm_splineModuli,
67 __global float * __restrict__ gm_virialAndEnergy,
68 __global float2 * __restrict__ gm_grid)
70 /* This kernel supports 2 different grid dimension orderings: YZX and XYZ */
71 int majorDim, middleDim, minorDim;
72 if (gridOrdering == YZX)
78 if (gridOrdering == XYZ)
85 __global const float * __restrict__ gm_splineValueMajor = gm_splineModuli + kernelParams.grid.splineValuesOffset[majorDim];
86 __global const float * __restrict__ gm_splineValueMiddle = gm_splineModuli + kernelParams.grid.splineValuesOffset[middleDim];
87 __global const float * __restrict__ gm_splineValueMinor = gm_splineModuli + kernelParams.grid.splineValuesOffset[minorDim];
89 /* Various grid sizes and indices */
90 const int localOffsetMinor = 0, localOffsetMajor = 0, localOffsetMiddle = 0; //unused
91 const int localSizeMinor = kernelParams.grid.complexGridSizePadded[minorDim];
92 const int localSizeMiddle = kernelParams.grid.complexGridSizePadded[middleDim];
93 const int localCountMiddle = kernelParams.grid.complexGridSize[middleDim];
94 const int localCountMinor = kernelParams.grid.complexGridSize[minorDim];
95 const int nMajor = kernelParams.grid.realGridSize[majorDim];
96 const int nMiddle = kernelParams.grid.realGridSize[middleDim];
97 const int nMinor = kernelParams.grid.realGridSize[minorDim];
98 const int maxkMajor = (nMajor + 1) / 2; // X or Y
99 const int maxkMiddle = (nMiddle + 1) / 2; // Y OR Z => only check for !YZX
100 const int maxkMinor = (nMinor + 1) / 2; // Z or X => only check for YZX
102 /* Each thread works on one cell of the Fourier space complex 3D grid (gm_grid).
103 * Each block handles up to c_solveMaxWorkGroupSize cells -
104 * depending on the grid contiguous dimension size,
105 * that can range from a part of a single gridline to several complete gridlines.
107 const int threadLocalId = get_local_id(XX);
108 const int gridLineSize = localCountMinor;
109 const int gridLineIndex = threadLocalId / gridLineSize;
110 const int gridLineCellIndex = threadLocalId - gridLineSize * gridLineIndex;
111 const int gridLinesPerBlock = get_local_size(XX) / gridLineSize;
112 const int activeWarps = (get_local_size(XX) / warp_size);
113 const int indexMinor = get_group_id(XX) * get_local_size(XX) + gridLineCellIndex;
114 const int indexMiddle = get_group_id(YY) * gridLinesPerBlock + gridLineIndex;
115 const int indexMajor = get_group_id(ZZ);
117 /* Optional outputs */
126 assert(indexMajor < kernelParams.grid.complexGridSize[majorDim]);
127 if ((indexMiddle < localCountMiddle) & (indexMinor < localCountMinor) & (gridLineIndex < gridLinesPerBlock))
129 /* The offset should be equal to the global thread index for coalesced access */
130 const int gridIndex = (indexMajor * localSizeMiddle + indexMiddle) * localSizeMinor + indexMinor;
131 __global float2 * __restrict__ gm_gridCell = gm_grid + gridIndex;
133 const int kMajor = indexMajor + localOffsetMajor;
134 /* Checking either X in XYZ, or Y in YZX cases */
135 const float mMajor = (kMajor < maxkMajor) ? kMajor : (kMajor - nMajor);
137 const int kMiddle = indexMiddle + localOffsetMiddle;
138 float mMiddle = kMiddle;
139 /* Checking Y in XYZ case */
140 if (gridOrdering == XYZ)
142 mMiddle = (kMiddle < maxkMiddle) ? kMiddle : (kMiddle - nMiddle);
144 const int kMinor = localOffsetMinor + indexMinor;
145 float mMinor = kMinor;
146 /* Checking X in YZX case */
147 if (gridOrdering == YZX)
149 mMinor = (kMinor < maxkMinor) ? kMinor : (kMinor - nMinor);
151 /* We should skip the k-space point (0,0,0) */
152 const bool notZeroPoint = (kMinor > 0) | (kMajor > 0) | (kMiddle > 0);
155 if (gridOrdering == YZX)
161 if (gridOrdering == XYZ)
168 /* 0.5 correction factor for the first and last components of a Z dimension */
169 float corner_fac = 1.0f;
170 if (gridOrdering == YZX)
172 if ((kMiddle == 0) | (kMiddle == maxkMiddle))
177 if (gridOrdering == XYZ)
179 if ((kMinor == 0) | (kMinor == maxkMinor))
187 const float mhxk = mX * kernelParams.current.recipBox[XX][XX];
188 const float mhyk = mX * kernelParams.current.recipBox[XX][YY] + mY * kernelParams.current.recipBox[YY][YY];
189 const float mhzk = mX * kernelParams.current.recipBox[XX][ZZ] + mY * kernelParams.current.recipBox[YY][ZZ] + mZ * kernelParams.current.recipBox[ZZ][ZZ];
191 const float m2k = mhxk * mhxk + mhyk * mhyk + mhzk * mhzk;
193 const float denom = m2k * M_PI_F * kernelParams.current.boxVolume * gm_splineValueMajor[kMajor] * gm_splineValueMiddle[kMiddle] * gm_splineValueMinor[kMinor];
194 assert(isfinite(denom));
195 assert(denom != 0.0f);
196 const float tmp1 = exp(-kernelParams.grid.ewaldFactor * m2k);
197 const float etermk = kernelParams.constants.elFactor * tmp1 / denom;
199 float2 gridValue = *gm_gridCell;
200 const float2 oldGridValue = gridValue;
202 gridValue.x *= etermk;
203 gridValue.y *= etermk;
204 *gm_gridCell = gridValue;
206 if (computeEnergyAndVirial)
208 const float tmp1k = 2.0f * (gridValue.x * oldGridValue.x + gridValue.y * oldGridValue.y);
210 const float vfactor = (kernelParams.grid.ewaldFactor + 1.0f / m2k) * 2.0f;
211 const float ets2 = corner_fac * tmp1k;
214 const float ets2vf = ets2 * vfactor;
216 virxx = ets2vf * mhxk * mhxk - ets2;
217 virxy = ets2vf * mhxk * mhyk;
218 virxz = ets2vf * mhxk * mhzk;
219 viryy = ets2vf * mhyk * mhyk - ets2;
220 viryz = ets2vf * mhyk * mhzk;
221 virzz = ets2vf * mhzk * mhzk - ets2;
226 // This is only for reduction below. OpenCL 1.2: all local memory must be declared on kernel scope.
227 __local float sm_virialAndEnergy[c_virialAndEnergyCount * warp_size];
229 /* Optional energy/virial reduction */
230 if (computeEnergyAndVirial)
232 // TODO: implement AMD intrinsics reduction, like with shuffles in CUDA version. #2514
234 /* Shared memory reduction with atomics.
235 * Each component is first reduced into warp_size positions in the shared memory;
236 * Then first few warps reduce everything further and add to the global memory.
237 * This can likely be improved, but is anyway faster than the previous straightforward reduction,
238 * which was using too much shared memory (for storing all 7 floats on each thread).
241 const int lane = threadLocalId & (warp_size - 1);
242 const int warpIndex = threadLocalId / warp_size;
243 const bool firstWarp = (warpIndex == 0);
246 sm_virialAndEnergy[0 * warp_size + lane] = virxx;
247 sm_virialAndEnergy[1 * warp_size + lane] = viryy;
248 sm_virialAndEnergy[2 * warp_size + lane] = virzz;
249 sm_virialAndEnergy[3 * warp_size + lane] = virxy;
250 sm_virialAndEnergy[4 * warp_size + lane] = virxz;
251 sm_virialAndEnergy[5 * warp_size + lane] = viryz;
252 sm_virialAndEnergy[6 * warp_size + lane] = energy;
254 barrier(CLK_LOCAL_MEM_FENCE);
257 atomicAdd_l_f(sm_virialAndEnergy + 0 * warp_size + lane, virxx);
258 atomicAdd_l_f(sm_virialAndEnergy + 1 * warp_size + lane, viryy);
259 atomicAdd_l_f(sm_virialAndEnergy + 2 * warp_size + lane, virzz);
260 atomicAdd_l_f(sm_virialAndEnergy + 3 * warp_size + lane, virxy);
261 atomicAdd_l_f(sm_virialAndEnergy + 4 * warp_size + lane, virxz);
262 atomicAdd_l_f(sm_virialAndEnergy + 5 * warp_size + lane, viryz);
263 atomicAdd_l_f(sm_virialAndEnergy + 6 * warp_size + lane, energy);
265 barrier(CLK_LOCAL_MEM_FENCE);
267 const int numIter = (c_virialAndEnergyCount + activeWarps - 1) / activeWarps;
268 for (int i = 0; i < numIter; i++)
270 const int componentIndex = i * activeWarps + warpIndex;
271 if (componentIndex < c_virialAndEnergyCount)
273 const int targetIndex = componentIndex * warp_size + lane;
275 for (int reductionStride = warp_size >> 1; reductionStride >= 1; reductionStride >>= 1)
277 if (lane < reductionStride)
279 sm_virialAndEnergy[targetIndex] += sm_virialAndEnergy[targetIndex + reductionStride];
281 #ifdef _NVIDIA_SOURCE_
282 /* FIXME: this execution happens within execution width aka warp, but somehow NVIDIA OpenCL of all things
283 * fails without the memory barrier here. #2519
285 barrier(CLK_LOCAL_MEM_FENCE);
290 atomicAdd_g_f(gm_virialAndEnergy + componentIndex, sm_virialAndEnergy[targetIndex]);