2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2010, The GROMACS development team.
6 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
7 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /*! \libinternal \file
39 * \brief Declare functions for detection and initialization for GPU devices.
41 * \author Szilard Pall <pall.szilard@gmail.com>
42 * \author Mark Abraham <mark.j.abraham@gmail.com>
46 #ifndef GMX_GPU_UTILS_GPU_UTILS_H
47 #define GMX_GPU_UTILS_GPU_UTILS_H
54 #include "gromacs/gpu_utils/gpu_macros.h"
55 #include "gromacs/utility/basedefinitions.h"
57 struct DeviceInformation
;
58 struct gmx_gpu_info_t
;
65 //! Enum which is only used to describe transfer calls at the moment
66 enum class GpuApiCallBehavior
72 //! Types of actions associated to waiting or checking the completion of GPU tasks
73 enum class GpuTaskCompletion
75 Wait
, /*<< Issue a blocking wait for the task */
76 Check
/*<< Only check whether the task has completed */
79 /*! \brief Return whether GPUs can be detected
81 * Returns true when this is a build of \Gromacs configured to support
82 * GPU usage, GPU detection is not disabled by an environment variable
83 * and a valid device driver, ICD, and/or runtime was detected.
85 bool canPerformGpuDetection();
87 /*! \brief Return whether GPU detection is functioning correctly
89 * Returns true when this is a build of \Gromacs configured to support
90 * GPU usage, and a valid device driver, ICD, and/or runtime was detected.
92 * This function is not intended to be called from build
93 * configurations that do not support GPUs, and there will be no
94 * descriptive message in that case.
96 * \param[out] errorMessage When returning false on a build configured with
97 * GPU support and non-nullptr was passed,
98 * the string contains a descriptive message about
99 * why GPUs cannot be detected.
103 bool isGpuDetectionFunctional(std::string
* GPU_FUNC_ARGUMENT(errorMessage
))
104 GPU_FUNC_TERM_WITH_RETURN(false);
106 /*! \brief Find all GPUs in the system.
108 * Will detect every GPU supported by the device driver in use.
109 * Must only be called if canPerformGpuDetection() has returned true.
110 * This routine also checks for the compatibility of each and fill the
111 * gpu_info->deviceInfo array with the required information on each the
112 * device: ID, device properties, status.
114 * Note that this function leaves the GPU runtime API error state clean;
115 * this is implemented ATM in the CUDA flavor.
116 * TODO: check if errors do propagate in OpenCL as they do in CUDA and
117 * whether there is a mechanism to "clear" them.
119 * \param[in] gpu_info pointer to structure holding GPU information.
121 * \throws InternalError if a GPU API returns an unexpected failure (because
122 * the call to canDetectGpus() should always prevent this occuring)
125 void findGpus(gmx_gpu_info_t
* GPU_FUNC_ARGUMENT(gpu_info
)) GPU_FUNC_TERM
;
127 /*! \brief Return a container of the detected GPUs that are compatible.
129 * This function filters the result of the detection for compatible
130 * GPUs, based on the previously run compatibility tests.
132 * \param[in] gpu_info Information detected about GPUs, including compatibility.
133 * \return vector of IDs of GPUs already recorded as compatible */
134 std::vector
<int> getCompatibleGpus(const gmx_gpu_info_t
& gpu_info
);
136 /*! \brief Return a string describing how compatible the GPU with given \c index is.
138 * \param[in] gpu_info Information about detected GPUs
139 * \param[in] index index of GPU to ask about
140 * \returns A null-terminated C string describing the compatibility status, useful for error messages.
142 const char* getGpuCompatibilityDescription(const gmx_gpu_info_t
& gpu_info
, int index
);
144 /*! \brief Frees the gpu_dev and dev_use array fields of \p gpu_info.
146 * \param[in] gpu_info pointer to structure holding GPU information
148 void free_gpu_info(const gmx_gpu_info_t
* gpu_info
);
150 /*! \brief Initializes the GPU described by \c deviceInfo.
152 * TODO Doxygen complains about these - probably a Doxygen bug, since
153 * the patterns here are the same as elsewhere in this header.
155 * \param[in] deviceInfo device info of the GPU to initialize
157 * Issues a fatal error for any critical errors that occur during
161 void init_gpu(const DeviceInformation
* GPU_FUNC_ARGUMENT(deviceInfo
)) GPU_FUNC_TERM
;
163 /*! \brief Frees up the CUDA GPU used by the active context at the time of calling.
165 * If \c deviceInfo is nullptr, then it is understood that no device
166 * was selected so no context is active to be freed. Otherwise, the
167 * context is explicitly destroyed and therefore all data uploaded to
168 * the GPU is lost. This must only be called when none of this data is
169 * required anymore, because subsequent attempts to free memory
170 * associated with the context will otherwise fail.
172 * Calls gmx_warning upon errors.
174 * \param[in] deviceInfo device info of the GPU to clean up for
176 * \returns true if no error occurs during the freeing.
179 void free_gpu(const DeviceInformation
* CUDA_FUNC_ARGUMENT(deviceInfo
)) CUDA_FUNC_TERM
;
181 /*! \brief Return a pointer to the device info for \c deviceId
183 * \param[in] gpu_info GPU info of all detected devices in the system.
184 * \param[in] deviceId ID for the GPU device requested.
186 * \returns Pointer to the device info for \c deviceId.
189 DeviceInformation
* getDeviceInfo(const gmx_gpu_info_t
& GPU_FUNC_ARGUMENT(gpu_info
),
190 int GPU_FUNC_ARGUMENT(deviceId
)) GPU_FUNC_TERM_WITH_RETURN(nullptr);
192 /*! \brief Returns the device ID of the CUDA GPU currently in use.
194 * The GPU used is the one that is active at the time of the call in the active context.
196 * \returns device ID of the GPU in use at the time of the call
199 int get_current_cuda_gpu_device_id() CUDA_FUNC_TERM_WITH_RETURN(-1);
201 /*! \brief Formats and returns a device information string for a given GPU.
203 * Given an index *directly* into the array of available GPUs (gpu_dev)
204 * returns a formatted info string for the respective GPU which includes
205 * ID, name, compute capability, and detection status.
207 * \param[out] s pointer to output string (has to be allocated externally)
208 * \param[in] gpu_info Information about detected GPUs
209 * \param[in] index an index *directly* into the array of available GPUs
212 void get_gpu_device_info_string(char* GPU_FUNC_ARGUMENT(s
),
213 const gmx_gpu_info_t
& GPU_FUNC_ARGUMENT(gpu_info
),
214 int GPU_FUNC_ARGUMENT(index
)) GPU_FUNC_TERM
;
217 /*! \brief Returns the size of the gpu_dev_info struct.
219 * The size of gpu_dev_info can be used for allocation and communication.
221 * \returns size in bytes of gpu_dev_info
224 size_t sizeof_gpu_dev_info() GPU_FUNC_TERM_WITH_RETURN(0);
226 //! Get status of device with specified index
227 int gpu_info_get_stat(const gmx_gpu_info_t
& info
, int index
);
229 /*! \brief Check if GROMACS has been built with GPU support.
231 * \param[in] error Pointer to error string or nullptr.
232 * \todo Move this to NB module once it exists.
234 bool buildSupportsNonbondedOnGpu(std::string
* error
);
236 /*! \brief Starts the GPU profiler if mdrun is being profiled.
238 * When a profiler run is in progress (based on the presence of the NVPROF_ID
239 * env. var.), the profiler is started to begin collecting data during the
240 * rest of the run (or until stopGpuProfiler is called).
242 * Note that this is implemented only for the CUDA API.
245 void startGpuProfiler() CUDA_FUNC_TERM
;
248 /*! \brief Resets the GPU profiler if mdrun is being profiled.
250 * When a profiler run is in progress (based on the presence of the NVPROF_ID
251 * env. var.), the profiler data is restet in order to eliminate the data collected
252 * from the preceding part fo the run.
254 * This function should typically be called at the mdrun counter reset time.
256 * Note that this is implemented only for the CUDA API.
259 void resetGpuProfiler() CUDA_FUNC_TERM
;
262 /*! \brief Stops the CUDA profiler if mdrun is being profiled.
264 * This function can be called at cleanup when skipping recording
265 * recording subsequent API calls from being traces/profiled is desired,
266 * e.g. before uninitialization.
268 * Note that this is implemented only for the CUDA API.
271 void stopGpuProfiler() CUDA_FUNC_TERM
;
273 //! Tells whether the host buffer was pinned for non-blocking transfers. Only implemented for CUDA.
275 bool isHostMemoryPinned(const void* CUDA_FUNC_ARGUMENT(h_ptr
)) CUDA_FUNC_TERM_WITH_RETURN(false);
277 /*! \brief Enable peer access between GPUs where supported
278 * \param[in] gpuIdsToUse List of GPU IDs in use
279 * \param[in] mdlog Logger object
282 void setupGpuDevicePeerAccess(const std::vector
<int>& CUDA_FUNC_ARGUMENT(gpuIdsToUse
),
283 const gmx::MDLogger
& CUDA_FUNC_ARGUMENT(mdlog
)) CUDA_FUNC_TERM
;