2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2010,2011,2012,2013,2014,2015,2016, The GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define functions for detection and initialization for CUDA devices.
39 * \author Szilard Pall <pall.szilard@gmail.com>
44 #include "gpu_utils.h"
50 #include <cuda_profiler_api.h>
52 #include "gromacs/gpu_utils/cudautils.cuh"
53 #include "gromacs/gpu_utils/device_context.h"
54 #include "gromacs/gpu_utils/device_stream.h"
55 #include "gromacs/gpu_utils/pmalloc_cuda.h"
56 #include "gromacs/hardware/device_information.h"
57 #include "gromacs/hardware/device_management.h"
58 #include "gromacs/utility/basedefinitions.h"
59 #include "gromacs/utility/cstringutil.h"
60 #include "gromacs/utility/exceptions.h"
61 #include "gromacs/utility/fatalerror.h"
62 #include "gromacs/utility/gmxassert.h"
63 #include "gromacs/utility/logger.h"
64 #include "gromacs/utility/programcontext.h"
65 #include "gromacs/utility/smalloc.h"
66 #include "gromacs/utility/snprintf.h"
67 #include "gromacs/utility/stringutil.h"
69 static bool cudaProfilerRun = ((getenv("NVPROF_ID") != nullptr));
71 bool isHostMemoryPinned(const void* h_ptr)
73 cudaPointerAttributes memoryAttributes;
74 cudaError_t stat = cudaPointerGetAttributes(&memoryAttributes, h_ptr);
76 bool isPinned = false;
80 // In CUDA 11.0, the field called memoryType in
81 // cudaPointerAttributes was replaced by a field called
82 // type, along with a documented change of behavior when the
83 // pointer passed to cudaPointerGetAttributes is to
84 // non-registered host memory. That change means that this
85 // code needs conditional compilation and different
86 // execution paths to function with all supported versions.
87 #if CUDART_VERSION < 11 * 1000
90 isPinned = (memoryAttributes.type == cudaMemoryTypeHost);
94 case cudaErrorInvalidValue:
95 // If the buffer was not pinned, then it will not be recognized by CUDA at all
97 // Reset the last error status
101 default: CU_RET_ERR(stat, "Unexpected CUDA error");
106 void startGpuProfiler(void)
108 /* The NVPROF_ID environment variable is set by nvprof and indicates that
109 mdrun is executed in the CUDA profiler.
110 If nvprof was run is with "--profile-from-start off", the profiler will
111 be started here. This way we can avoid tracing the CUDA events from the
112 first part of the run. Starting the profiler again does nothing.
117 stat = cudaProfilerStart();
118 CU_RET_ERR(stat, "cudaProfilerStart failed");
122 void stopGpuProfiler(void)
124 /* Stopping the nvidia here allows us to eliminate the subsequent
125 API calls from the trace, e.g. uninitialization and cleanup. */
129 stat = cudaProfilerStop();
130 CU_RET_ERR(stat, "cudaProfilerStop failed");
134 void resetGpuProfiler(void)
136 /* With CUDA <=7.5 the profiler can't be properly reset; we can only start
137 * the profiling here (can't stop it) which will achieve the desired effect if
138 * the run was started with the profiling disabled.
140 * TODO: add a stop (or replace it with reset) when this will work correctly in CUDA.
149 /*! \brief Check status returned from peer access CUDA call, and error out or warn appropriately
150 * \param[in] stat CUDA call return status
151 * \param[in] gpuA ID for GPU initiating peer access call
152 * \param[in] gpuB ID for remote GPU
153 * \param[in] mdlog Logger object
154 * \param[in] cudaCallName name of CUDA peer access call
156 static void peerAccessCheckStat(const cudaError_t stat,
159 const gmx::MDLogger& mdlog,
160 const char* cudaCallName)
162 if ((stat == cudaErrorInvalidDevice) || (stat == cudaErrorInvalidValue))
164 std::string errorString =
165 gmx::formatString("%s from GPU %d to GPU %d failed", cudaCallName, gpuA, gpuB);
166 CU_RET_ERR(stat, errorString.c_str());
168 if (stat != cudaSuccess)
170 GMX_LOG(mdlog.warning)
172 .appendTextFormatted(
173 "GPU peer access not enabled between GPUs %d and %d due to unexpected "
174 "return value from %s: %s",
175 gpuA, gpuB, cudaCallName, cudaGetErrorString(stat));
179 void setupGpuDevicePeerAccess(const std::vector<int>& gpuIdsToUse, const gmx::MDLogger& mdlog)
183 // take a note of currently-set GPU
185 stat = cudaGetDevice(¤tGpu);
186 CU_RET_ERR(stat, "cudaGetDevice in setupGpuDevicePeerAccess failed");
188 std::string message = gmx::formatString(
189 "Note: Peer access enabled between the following GPU pairs in the node:\n ");
190 bool peerAccessEnabled = false;
192 for (unsigned int i = 0; i < gpuIdsToUse.size(); i++)
194 int gpuA = gpuIdsToUse[i];
195 stat = cudaSetDevice(gpuA);
196 if (stat != cudaSuccess)
198 GMX_LOG(mdlog.warning)
200 .appendTextFormatted(
201 "GPU peer access not enabled due to unexpected return value from "
202 "cudaSetDevice(%d): %s",
203 gpuA, cudaGetErrorString(stat));
206 for (unsigned int j = 0; j < gpuIdsToUse.size(); j++)
210 int gpuB = gpuIdsToUse[j];
211 int canAccessPeer = 0;
212 stat = cudaDeviceCanAccessPeer(&canAccessPeer, gpuA, gpuB);
213 peerAccessCheckStat(stat, gpuA, gpuB, mdlog, "cudaDeviceCanAccessPeer");
217 stat = cudaDeviceEnablePeerAccess(gpuB, 0);
218 peerAccessCheckStat(stat, gpuA, gpuB, mdlog, "cudaDeviceEnablePeerAccess");
220 message = gmx::formatString("%s%d->%d ", message.c_str(), gpuA, gpuB);
221 peerAccessEnabled = true;
227 // re-set GPU to that originally set
228 stat = cudaSetDevice(currentGpu);
229 if (stat != cudaSuccess)
231 CU_RET_ERR(stat, "cudaSetDevice in setupGpuDevicePeerAccess failed");
235 if (peerAccessEnabled)
237 GMX_LOG(mdlog.info).asParagraph().appendTextFormatted("%s", message.c_str());