2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2014,2015,2016,2017 by the GROMACS development team.
5 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_GPU_UTILS_CUDAUTILS_CUH
37 #define GMX_GPU_UTILS_CUDAUTILS_CUH
44 #include "gromacs/gpu_utils/device_stream.h"
45 #include "gromacs/gpu_utils/gputraits.cuh"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/math/vectypes.h"
48 #include "gromacs/utility/exceptions.h"
49 #include "gromacs/utility/fatalerror.h"
50 #include "gromacs/utility/gmxassert.h"
51 #include "gromacs/utility/stringutil.h"
58 /*! \brief Helper function to ensure no pending error silently
59 * disrupts error handling.
61 * Asserts in a debug build if an unhandled error is present. Issues a
62 * warning at run time otherwise.
64 * \todo This is similar to CU_CHECK_PREV_ERR, which should be
67 static inline void ensureNoPendingCudaError(const char* errorMessage)
69 // Ensure there is no pending error that would otherwise affect
70 // the behaviour of future error handling.
71 cudaError_t stat = cudaGetLastError();
72 if (stat == cudaSuccess)
77 // If we would find an error in a release build, we do not know
78 // what is appropriate to do about it, so assert only for debug
80 auto fullMessage = formatString(
81 "%s An unhandled error from a previous CUDA operation was detected. %s: %s",
82 errorMessage, cudaGetErrorName(stat), cudaGetErrorString(stat));
83 GMX_ASSERT(stat == cudaSuccess, fullMessage.c_str());
84 // TODO When we evolve a better logging framework, use that
85 // for release-build error reporting.
86 gmx_warning("%s", fullMessage.c_str());
92 enum class GpuApiCallBehavior;
94 /* TODO error checking needs to be rewritten. We have 2 types of error checks needed
95 based on where they occur in the code:
96 - non performance-critical: these errors are unsafe to be ignored and must be
97 _always_ checked for, e.g. initializations
98 - performance critical: handling errors might hurt performance so care need to be taken
99 when/if we should check for them at all, e.g. in cu_upload_X. However, we should be
100 able to turn the check for these errors on!
102 Probably we'll need two sets of the macros below...
105 #define CHECK_CUDA_ERRORS
107 #ifdef CHECK_CUDA_ERRORS
109 /*! Check for CUDA error on the return status of a CUDA RT API call. */
110 # define CU_RET_ERR(status, msg) \
113 if (status != cudaSuccess) \
115 gmx_fatal(FARGS, "%s: %s\n", msg, cudaGetErrorString(status)); \
119 /*! Check for any previously occurred uncaught CUDA error. */
120 # define CU_CHECK_PREV_ERR() \
123 cudaError_t _CU_CHECK_PREV_ERR_status = cudaGetLastError(); \
124 if (_CU_CHECK_PREV_ERR_status != cudaSuccess) \
127 "Just caught a previously occurred CUDA error (%s), will try to " \
129 cudaGetErrorString(_CU_CHECK_PREV_ERR_status)); \
133 #else /* CHECK_CUDA_ERRORS */
135 # define CU_RET_ERR(status, msg) \
139 # define CU_CHECK_PREV_ERR() \
144 #endif /* CHECK_CUDA_ERRORS */
146 // TODO: the 2 functions below are pretty much a constructor/destructor of a simple
147 // GPU table object. There is also almost self-contained fetchFromParamLookupTable()
148 // in cuda_kernel_utils.cuh. They could all live in a separate class/struct file.
150 /*! \brief Add a triplets stored in a float3 to an rvec variable.
152 * \param[out] a Rvec to increment
153 * \param[in] b Float triplet to increment with.
155 static inline void rvec_inc(rvec a, const float3 b)
157 rvec tmp = { b.x, b.y, b.z };
161 /*! \brief Returns true if all tasks in \p s have completed.
163 * \param[in] deviceStream CUDA stream to check.
165 * \returns True if all tasks enqueued in the stream \p deviceStream (at the time of this call) have completed.
167 static inline bool haveStreamTasksCompleted(const DeviceStream& deviceStream)
169 cudaError_t stat = cudaStreamQuery(deviceStream.stream());
171 if (stat == cudaErrorNotReady)
173 // work is still in progress in the stream
177 GMX_ASSERT(stat != cudaErrorInvalidResourceHandle, "Stream idnetifier not valid");
179 // cudaSuccess and cudaErrorNotReady are the expected return values
180 CU_RET_ERR(stat, "Unexpected cudaStreamQuery failure");
182 GMX_ASSERT(stat == cudaSuccess,
183 "Values other than cudaSuccess should have been explicitly handled");
188 /* Kernel launch helpers */
191 * A function for setting up a single CUDA kernel argument.
192 * This is the tail of the compile-time recursive function below.
193 * It has to be seen by the compiler first.
195 * \tparam totalArgsCount Number of the kernel arguments
196 * \tparam KernelPtr Kernel function handle type
197 * \param[in] argIndex Index of the current argument
199 template<size_t totalArgsCount, typename KernelPtr>
200 void prepareGpuKernelArgument(KernelPtr /*kernel*/,
201 std::array<void*, totalArgsCount>* /* kernelArgsPtr */,
202 size_t gmx_used_in_debug argIndex)
204 GMX_ASSERT(argIndex == totalArgsCount, "Tail expansion");
208 * Compile-time recursive function for setting up a single CUDA kernel argument.
209 * This function copies a kernel argument pointer \p argPtr into \p kernelArgsPtr,
210 * and calls itself on the next argument, eventually calling the tail function above.
212 * \tparam CurrentArg Type of the current argument
213 * \tparam RemainingArgs Types of remaining arguments after the current one
214 * \tparam totalArgsCount Number of the kernel arguments
215 * \tparam KernelPtr Kernel function handle type
216 * \param[in] kernel Kernel function handle
217 * \param[in,out] kernelArgsPtr Pointer to the argument array to be filled in
218 * \param[in] argIndex Index of the current argument
219 * \param[in] argPtr Pointer to the current argument
220 * \param[in] otherArgsPtrs Pack of pointers to arguments remaining to process after the current one
222 template<typename CurrentArg, typename... RemainingArgs, size_t totalArgsCount, typename KernelPtr>
223 void prepareGpuKernelArgument(KernelPtr kernel,
224 std::array<void*, totalArgsCount>* kernelArgsPtr,
226 const CurrentArg* argPtr,
227 const RemainingArgs*... otherArgsPtrs)
229 (*kernelArgsPtr)[argIndex] = (void*)argPtr;
230 prepareGpuKernelArgument(kernel, kernelArgsPtr, argIndex + 1, otherArgsPtrs...);
234 * A wrapper function for setting up all the CUDA kernel arguments.
235 * Calls the recursive functions above.
237 * \tparam KernelPtr Kernel function handle type
238 * \tparam Args Types of all the kernel arguments
239 * \param[in] kernel Kernel function handle
240 * \param[in] argsPtrs Pointers to all the kernel arguments
241 * \returns A prepared parameter pack to be used with launchGpuKernel() as the last argument.
243 template<typename KernelPtr, typename... Args>
244 std::array<void*, sizeof...(Args)> prepareGpuKernelArguments(KernelPtr kernel,
245 const KernelLaunchConfig& /*config */,
246 const Args*... argsPtrs)
248 std::array<void*, sizeof...(Args)> kernelArgs;
249 prepareGpuKernelArgument(kernel, &kernelArgs, 0, argsPtrs...);
253 /*! \brief Launches the CUDA kernel and handles the errors.
255 * \tparam Args Types of all the kernel arguments
256 * \param[in] kernel Kernel function handle
257 * \param[in] config Kernel configuration for launching
258 * \param[in] deviceStream GPU stream to launch kernel in
259 * \param[in] kernelName Human readable kernel description, for error handling only
260 * \param[in] kernelArgs Array of the pointers to the kernel arguments, prepared by
261 * prepareGpuKernelArguments() \throws gmx::InternalError on kernel launch failure
263 template<typename... Args>
264 void launchGpuKernel(void (*kernel)(Args...),
265 const KernelLaunchConfig& config,
266 const DeviceStream& deviceStream,
267 CommandEvent* /*timingEvent */,
268 const char* kernelName,
269 const std::array<void*, sizeof...(Args)>& kernelArgs)
271 dim3 blockSize(config.blockSize[0], config.blockSize[1], config.blockSize[2]);
272 dim3 gridSize(config.gridSize[0], config.gridSize[1], config.gridSize[2]);
273 cudaLaunchKernel((void*)kernel, gridSize, blockSize, const_cast<void**>(kernelArgs.data()),
274 config.sharedMemorySize, deviceStream.stream());
276 cudaError_t status = cudaGetLastError();
277 if (cudaSuccess != status)
279 const std::string errorMessage =
280 "GPU kernel (" + std::string(kernelName)
281 + ") failed to launch: " + std::string(cudaGetErrorString(status));
282 GMX_THROW(gmx::InternalError(errorMessage));