2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "cudautils.cuh"
42 #include "gromacs/utility/smalloc.h"
44 /*** Generic CUDA data operation wrappers ***/
46 /*! Launches synchronous or asynchronous host to device memory copy.
48 * The copy is launched in stream s or if not specified, in stream 0.
50 static int cu_copy_D2H_generic(void * h_dest, void * d_src, size_t bytes,
51 bool bAsync = false, cudaStream_t s = 0)
55 if (h_dest == NULL || d_src == NULL || bytes == 0)
62 stat = cudaMemcpyAsync(h_dest, d_src, bytes, cudaMemcpyDeviceToHost, s);
63 CU_RET_ERR(stat, "DtoH cudaMemcpyAsync failed");
68 stat = cudaMemcpy(h_dest, d_src, bytes, cudaMemcpyDeviceToHost);
69 CU_RET_ERR(stat, "DtoH cudaMemcpy failed");
75 int cu_copy_D2H(void * h_dest, void * d_src, size_t bytes)
77 return cu_copy_D2H_generic(h_dest, d_src, bytes, false);
81 * The copy is launched in stream s or if not specified, in stream 0.
83 int cu_copy_D2H_async(void * h_dest, void * d_src, size_t bytes, cudaStream_t s = 0)
85 return cu_copy_D2H_generic(h_dest, d_src, bytes, true, s);
88 /*! Launches synchronous or asynchronous device to host memory copy.
90 * The copy is launched in stream s or if not specified, in stream 0.
92 static int cu_copy_H2D_generic(void * d_dest, void * h_src, size_t bytes,
93 bool bAsync = false, cudaStream_t s = 0)
97 if (d_dest == NULL || h_src == NULL || bytes == 0)
104 stat = cudaMemcpyAsync(d_dest, h_src, bytes, cudaMemcpyHostToDevice, s);
105 CU_RET_ERR(stat, "HtoD cudaMemcpyAsync failed");
109 stat = cudaMemcpy(d_dest, h_src, bytes, cudaMemcpyHostToDevice);
110 CU_RET_ERR(stat, "HtoD cudaMemcpy failed");
116 int cu_copy_H2D(void * d_dest, void * h_src, size_t bytes)
118 return cu_copy_H2D_generic(d_dest, h_src, bytes, false);
122 * The copy is launched in stream s or if not specified, in stream 0.
124 int cu_copy_H2D_async(void * d_dest, void * h_src, size_t bytes, cudaStream_t s = 0)
126 return cu_copy_H2D_generic(d_dest, h_src, bytes, true, s);
129 float cu_event_elapsed(cudaEvent_t start, cudaEvent_t end)
134 stat = cudaEventElapsedTime(&t, start, end);
135 CU_RET_ERR(stat, "cudaEventElapsedTime failed in cu_event_elapsed");
140 int cu_wait_event(cudaEvent_t e)
144 s = cudaEventSynchronize(e);
145 CU_RET_ERR(s, "cudaEventSynchronize failed in cu_wait_event");
151 * If time != NULL it also calculates the time elapsed between start and end and
152 * return this is milliseconds.
154 int cu_wait_event_time(cudaEvent_t end, cudaEvent_t start, float *time)
158 s = cudaEventSynchronize(end);
159 CU_RET_ERR(s, "cudaEventSynchronize failed in cu_wait_event");
163 *time = cu_event_elapsed(start, end);
169 /**** Operation on buffered arrays (arrays with "over-allocation" in gmx wording) *****/
172 * If the pointers to the size variables are NULL no resetting happens.
174 void cu_free_buffered(void *d_ptr, int *n, int *nalloc)
180 stat = cudaFree(d_ptr);
181 CU_RET_ERR(stat, "cudaFree failed");
196 * Reallocation of the memory pointed by d_ptr and copying of the data from
197 * the location pointed by h_src host-side pointer is done. Allocation is
198 * buffered and therefore freeing is only needed if the previously allocated
199 * space is not enough.
200 * The H2D copy is launched in stream s and can be done synchronously or
201 * asynchronously (the default is the latter).
203 void cu_realloc_buffered(void **d_dest, void *h_src,
205 int *curr_size, int *curr_alloc_size,
212 if (d_dest == NULL || req_size < 0)
217 /* reallocate only if the data does not fit = allocation size is smaller
218 than the current requested size */
219 if (req_size > *curr_alloc_size)
221 /* only free if the array has already been initialized */
222 if (*curr_alloc_size >= 0)
224 cu_free_buffered(*d_dest, curr_size, curr_alloc_size);
227 *curr_alloc_size = over_alloc_large(req_size);
229 stat = cudaMalloc(d_dest, *curr_alloc_size * type_size);
230 CU_RET_ERR(stat, "cudaMalloc failed in cu_free_buffered");
233 /* size could have changed without actual reallocation */
234 *curr_size = req_size;
236 /* upload to device */
241 cu_copy_H2D_async(*d_dest, h_src, *curr_size * type_size, s);
245 cu_copy_H2D(*d_dest, h_src, *curr_size * type_size);