2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * Implements PmeGpuProgramImpl, which stores permanent PME GPU context-derived data,
39 * such as (compiled) kernel handles.
41 * \author Aleksei Iupinov <a.yupinov@gmail.com>
42 * \ingroup module_ewald
46 #include "gromacs/gpu_utils/gmxopencl.h"
47 #include "gromacs/gpu_utils/ocl_compiler.h"
48 #include "gromacs/utility/stringutil.h"
50 #include "pme_gpu_constants.h"
51 #include "pme_gpu_internal.h" // for GridOrdering enum
52 #include "pme_gpu_program_impl.h"
53 #include "pme_gpu_types_host.h"
56 PmeGpuProgramImpl::PmeGpuProgramImpl(const gmx_device_info_t
*deviceInfo
)
58 // Context creation (which should happen outside of this class: #2522)
59 cl_platform_id platformId
= deviceInfo
->ocl_gpu_id
.ocl_platform_id
;
60 cl_device_id deviceId
= deviceInfo
->ocl_gpu_id
.ocl_device_id
;
61 cl_context_properties contextProperties
[3];
62 contextProperties
[0] = CL_CONTEXT_PLATFORM
;
63 contextProperties
[1] = reinterpret_cast<cl_context_properties
>(platformId
);
64 contextProperties
[2] = 0; /* Terminates the list of properties */
67 context
= clCreateContext(contextProperties
, 1, &deviceId
, nullptr, nullptr, &clError
);
68 if (clError
!= CL_SUCCESS
)
70 const std::string errorString
= gmx::formatString("Failed to create context for PME on GPU #%s:\n OpenCL error %d: %s",
71 deviceInfo
->device_name
, clError
, ocl_get_error_string(clError
).c_str());
72 GMX_THROW(gmx::InternalError(errorString
));
76 warpSize
= gmx::ocl::getDeviceWarpSize(context
, deviceId
);
77 // TODO: for Intel ideally we'd want to set these based on the compiler warp size
78 // but given that we've done no tuning for Intel iGPU, this is as good as anything.
79 spreadWorkGroupSize
= std::min(c_spreadMaxWarpsPerBlock
* warpSize
,
80 deviceInfo
->maxWorkGroupSize
);
81 solveMaxWorkGroupSize
= std::min(c_solveMaxWarpsPerBlock
* warpSize
,
82 deviceInfo
->maxWorkGroupSize
);
83 gatherWorkGroupSize
= std::min(c_gatherMaxWarpsPerBlock
* warpSize
,
84 deviceInfo
->maxWorkGroupSize
);
86 compileKernels(deviceInfo
);
89 PmeGpuProgramImpl::~PmeGpuProgramImpl()
91 // TODO: log releasing errors
92 cl_int gmx_used_in_debug stat
= 0;
93 stat
|= clReleaseKernel(splineAndSpreadKernel
);
94 stat
|= clReleaseKernel(splineKernel
);
95 stat
|= clReleaseKernel(spreadKernel
);
96 stat
|= clReleaseKernel(gatherKernel
);
97 stat
|= clReleaseKernel(gatherReduceWithInputKernel
);
98 stat
|= clReleaseKernel(solveXYZKernel
);
99 stat
|= clReleaseKernel(solveXYZEnergyKernel
);
100 stat
|= clReleaseKernel(solveYZXKernel
);
101 stat
|= clReleaseKernel(solveYZXEnergyKernel
);
102 stat
|= clReleaseContext(context
);
103 GMX_ASSERT(stat
== CL_SUCCESS
, gmx::formatString("Failed to release PME OpenCL resources %d: %s",
104 stat
, ocl_get_error_string(stat
).c_str()).c_str());
107 /*! \brief Ensure that spread/gather kernels have been compiled to a suitable warp size
109 * On Intel the exec width/warp is decided at compile-time and can be
110 * smaller than the minimum order^2 required in spread/gather ATM which
111 * we need to check for.
113 static void checkRequiredWarpSize(cl_kernel kernel
,
114 const char* kernelName
,
115 const gmx_device_info_t
*deviceInfo
)
117 if (deviceInfo
->vendor_e
== OCL_VENDOR_INTEL
)
119 size_t kernelWarpSize
= gmx::ocl::getKernelWarpSize(kernel
, deviceInfo
->ocl_gpu_id
.ocl_device_id
);
121 if (kernelWarpSize
< c_pmeSpreadGatherMinWarpSize
)
123 const std::string errorString
= gmx::formatString("PME OpenCL kernels require >=%d execution width, but the %s kernel "
124 "has been compiled for the device %s to a %zu width and therefore it can not execute correctly.",
125 c_pmeSpreadGatherMinWarpSize
, kernelName
,
126 deviceInfo
->device_name
, kernelWarpSize
);
127 GMX_THROW(gmx::InternalError(errorString
));
132 void PmeGpuProgramImpl::compileKernels(const gmx_device_info_t
*deviceInfo
)
134 // We might consider storing program as a member variable if it's needed later
135 cl_program program
= nullptr;
136 /* Need to catch std::bad_alloc here and during compilation string handling. */
139 /* Here we pass macros and static const int variables defined in include
140 * files outside as macros, to avoid including those files
141 * in the JIT compilation that happens at runtime.
143 const std::string commonDefines
= gmx::formatString(
146 "-DatomsPerWarp=%zd "
147 "-DthreadsPerAtom=%d "
148 // forwarding from pme_grid.h, used for spline computation table sizes only
149 "-Dc_pmeMaxUnitcellShift=%f "
150 // forwarding PME behavior constants from pme_gpu_constants.h
152 "-Dc_skipNeutralAtoms=%d "
153 "-Dc_virialAndEnergyCount=%d "
154 // forwarding kernel work sizes
155 "-Dc_spreadWorkGroupSize=%zd "
156 "-Dc_solveMaxWorkGroupSize=%zd "
157 "-Dc_gatherWorkGroupSize=%zd "
158 // forwarding from vectypes.h
159 "-DDIM=%d -DXX=%d -DYY=%d -DZZ=%d "
160 // decomposition parameter placeholders
161 "-DwrapX=true -DwrapY=true ",
164 warpSize
/ c_pmeSpreadGatherThreadsPerAtom
,
165 c_pmeSpreadGatherThreadsPerAtom
,
166 static_cast<float>(c_pmeMaxUnitcellShift
),
167 static_cast<int>(c_usePadding
),
168 static_cast<int>(c_skipNeutralAtoms
),
169 c_virialAndEnergyCount
,
171 solveMaxWorkGroupSize
,
176 /* TODO when we have a proper MPI-aware logging module,
177 the log output here should be written there */
178 program
= gmx::ocl::compileProgram(stderr
,
183 deviceInfo
->ocl_gpu_id
.ocl_device_id
,
184 deviceInfo
->vendor_e
);
186 catch (gmx::GromacsException
&e
)
188 e
.prependContext(gmx::formatString("Failed to compile PME kernels for GPU #%s\n",
189 deviceInfo
->device_name
));
193 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
195 constexpr cl_uint expectedKernelCount
= 9;
196 // Has to be equal or larger than the number of kernel instances.
197 // If it is not, CL_INVALID_VALUE will be thrown.
198 std::vector
<cl_kernel
> kernels(expectedKernelCount
, nullptr);
199 cl_uint actualKernelCount
= 0;
200 cl_int clError
= clCreateKernelsInProgram(program
, kernels
.size(), kernels
.data(), &actualKernelCount
);
201 if (clError
!= CL_SUCCESS
)
203 const std::string errorString
= gmx::formatString("Failed to create kernels for PME on GPU #%s:\n OpenCL error %d: %s",
204 deviceInfo
->device_name
, clError
, ocl_get_error_string(clError
).c_str());
205 GMX_THROW(gmx::InternalError(errorString
));
207 kernels
.resize(actualKernelCount
);
209 std::array
<char, 100> kernelNamesBuffer
;
210 for (const auto &kernel
: kernels
)
212 clError
= clGetKernelInfo(kernel
, CL_KERNEL_FUNCTION_NAME
,
213 kernelNamesBuffer
.size(), kernelNamesBuffer
.data(), nullptr);
214 if (clError
!= CL_SUCCESS
)
216 const std::string errorString
= gmx::formatString("Failed to parse kernels for PME on GPU #%s:\n OpenCL error %d: %s",
217 deviceInfo
->device_name
, clError
, ocl_get_error_string(clError
).c_str());
218 GMX_THROW(gmx::InternalError(errorString
));
221 // The names below must correspond to those defined in pme_program.cl
222 // TODO use a map with string key instead?
223 if (!strcmp(kernelNamesBuffer
.data(), "pmeSplineKernel"))
225 splineKernel
= kernel
;
227 else if (!strcmp(kernelNamesBuffer
.data(), "pmeSplineAndSpreadKernel"))
229 splineAndSpreadKernel
= kernel
;
230 checkRequiredWarpSize(splineAndSpreadKernel
, kernelNamesBuffer
.data(), deviceInfo
);
232 else if (!strcmp(kernelNamesBuffer
.data(), "pmeSpreadKernel"))
234 spreadKernel
= kernel
;
235 checkRequiredWarpSize(spreadKernel
, kernelNamesBuffer
.data(), deviceInfo
);
237 else if (!strcmp(kernelNamesBuffer
.data(), "pmeGatherKernel"))
239 gatherKernel
= kernel
;
240 checkRequiredWarpSize(gatherKernel
, kernelNamesBuffer
.data(), deviceInfo
);
242 else if (!strcmp(kernelNamesBuffer
.data(), "pmeGatherReduceWithInputKernel"))
244 gatherReduceWithInputKernel
= kernel
;
245 checkRequiredWarpSize(gatherReduceWithInputKernel
, kernelNamesBuffer
.data(), deviceInfo
);
247 else if (!strcmp(kernelNamesBuffer
.data(), "pmeSolveYZXKernel"))
249 solveYZXKernel
= kernel
;
251 else if (!strcmp(kernelNamesBuffer
.data(), "pmeSolveYZXEnergyKernel"))
253 solveYZXEnergyKernel
= kernel
;
255 else if (!strcmp(kernelNamesBuffer
.data(), "pmeSolveXYZKernel"))
257 solveXYZKernel
= kernel
;
259 else if (!strcmp(kernelNamesBuffer
.data(), "pmeSolveXYZEnergyKernel"))
261 solveXYZEnergyKernel
= kernel
;
264 clReleaseProgram(program
);