2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define OpenCL implementation of nbnxn_gpu_data_mgmt.h
38 * \author Anca Hamuraru <anca@streamcomputing.eu>
39 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Szilárd Páll <pall.szilard@gmail.com>
53 #include "gromacs/gpu_utils/gpu_utils.h"
54 #include "gromacs/gpu_utils/oclutils.h"
55 #include "gromacs/hardware/gpu_hw_info.h"
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/mdlib/force_flags.h"
58 #include "gromacs/mdlib/nb_verlet.h"
59 #include "gromacs/mdlib/nbnxn_consts.h"
60 #include "gromacs/mdlib/nbnxn_gpu.h"
61 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
62 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
63 #include "gromacs/mdlib/nbnxn_gpu_jit_support.h"
64 #include "gromacs/mdtypes/interaction_const.h"
65 #include "gromacs/mdtypes/md_enums.h"
66 #include "gromacs/pbcutil/ishift.h"
67 #include "gromacs/timing/gpu_timing.h"
68 #include "gromacs/utility/cstringutil.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/gmxassert.h"
71 #include "gromacs/utility/real.h"
72 #include "gromacs/utility/smalloc.h"
74 #include "nbnxn_ocl_internal.h"
75 #include "nbnxn_ocl_types.h"
77 /*! \brief This parameter should be determined heuristically from the
78 * kernel execution times
80 * This value is best for small systems on a single AMD Radeon R9 290X
81 * (and about 5% faster than 40, which is the default for CUDA
82 * devices). Larger simulation systems were quite insensitive to the
83 * value of this parameter.
85 static unsigned int gpu_min_ci_balanced_factor
= 50;
88 /*! \brief Returns true if LJ combination rules are used in the non-bonded kernels.
90 * Full doc in nbnxn_ocl_internal.h */
91 bool useLjCombRule(int vdwType
)
93 return (vdwType
== evdwOclCUTCOMBGEOM
||
94 vdwType
== evdwOclCUTCOMBLB
);
97 /*! \brief Reallocation device buffers
99 * Reallocation of the memory pointed by d_ptr and copying of the data from
100 * the location pointed by h_src host-side pointer is done. Allocation is
101 * buffered and therefore freeing is only needed if the previously allocated
102 * space is not enough.
103 * The H2D copy is launched in command queue s and can be done synchronously or
104 * asynchronously (the default is the latter).
105 * If copy_event is not NULL, on return it will contain an event object
106 * identifying the H2D copy. The event can further be used to queue a wait
107 * for this operation or to query profiling information.
108 * OpenCL equivalent of cu_realloc_buffered.
110 static void ocl_realloc_buffered(cl_mem
*d_dest
, void *h_src
,
112 int *curr_size
, int *curr_alloc_size
,
117 cl_event
*copy_event
= NULL
)
119 if (d_dest
== NULL
|| req_size
< 0)
124 /* reallocate only if the data does not fit = allocation size is smaller
125 than the current requested size */
126 if (req_size
> *curr_alloc_size
)
128 cl_int gmx_unused cl_error
;
130 /* only free if the array has already been initialized */
131 if (*curr_alloc_size
>= 0)
133 freeDeviceBuffer(d_dest
);
136 *curr_alloc_size
= over_alloc_large(req_size
);
138 *d_dest
= clCreateBuffer(context
, CL_MEM_READ_WRITE
, *curr_alloc_size
* type_size
, NULL
, &cl_error
);
139 assert(cl_error
== CL_SUCCESS
);
140 // TODO: handle errors, check clCreateBuffer flags
143 /* size could have changed without actual reallocation */
144 *curr_size
= req_size
;
146 /* upload to device */
151 ocl_copy_H2D_async(*d_dest
, h_src
, 0, *curr_size
* type_size
, s
, copy_event
);
155 ocl_copy_H2D_sync(*d_dest
, h_src
, 0, *curr_size
* type_size
, s
);
160 /*! \brief Tabulates the Ewald Coulomb force and initializes the size/scale
161 * and the table GPU array.
163 * If called with an already allocated table, it just re-uploads the
166 static void init_ewald_coulomb_force_table(const interaction_const_t
*ic
,
168 const gmx_device_runtime_data_t
*runData
)
174 if (nbp
->coulomb_tab_climg2d
!= NULL
)
176 freeDeviceBuffer(&(nbp
->coulomb_tab_climg2d
));
179 /* Switched from using textures to using buffers */
180 // TODO: decide which alternative is most efficient - textures or buffers.
182 cl_image_format array_format;
184 array_format.image_channel_data_type = CL_FLOAT;
185 array_format.image_channel_order = CL_R;
187 coul_tab = clCreateImage2D(runData->context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
188 &array_format, tabsize, 1, 0, ftmp, &cl_error);
191 coul_tab
= clCreateBuffer(runData
->context
, CL_MEM_READ_ONLY
| CL_MEM_COPY_HOST_PTR
, ic
->tabq_size
*sizeof(cl_float
), ic
->tabq_coul_F
, &cl_error
);
192 assert(cl_error
== CL_SUCCESS
);
193 // TODO: handle errors, check clCreateBuffer flags
195 nbp
->coulomb_tab_climg2d
= coul_tab
;
196 nbp
->coulomb_tab_scale
= ic
->tabq_scale
;
200 /*! \brief Initializes the atomdata structure first time, it only gets filled at
203 static void init_atomdata_first(cl_atomdata_t
*ad
, int ntypes
, gmx_device_runtime_data_t
*runData
)
209 /* An element of the shift_vec device buffer has the same size as one element
210 of the host side shift_vec buffer. */
211 ad
->shift_vec_elem_size
= sizeof(*(((nbnxn_atomdata_t
*)0)->shift_vec
));
213 // TODO: handle errors, check clCreateBuffer flags
214 ad
->shift_vec
= clCreateBuffer(runData
->context
, CL_MEM_READ_WRITE
, SHIFTS
* ad
->shift_vec_elem_size
, NULL
, &cl_error
);
215 assert(cl_error
== CL_SUCCESS
);
216 ad
->bShiftVecUploaded
= false;
218 /* An element of the fshift device buffer has the same size as one element
219 of the host side fshift buffer. */
220 ad
->fshift_elem_size
= sizeof(*(((cl_nb_staging_t
*)0)->fshift
));
222 ad
->fshift
= clCreateBuffer(runData
->context
, CL_MEM_READ_WRITE
, SHIFTS
* ad
->fshift_elem_size
, NULL
, &cl_error
);
223 assert(cl_error
== CL_SUCCESS
);
224 // TODO: handle errors, check clCreateBuffer flags
226 ad
->e_lj
= clCreateBuffer(runData
->context
, CL_MEM_READ_WRITE
, sizeof(float), NULL
, &cl_error
);
227 assert(cl_error
== CL_SUCCESS
);
228 // TODO: handle errors, check clCreateBuffer flags
230 ad
->e_el
= clCreateBuffer(runData
->context
, CL_MEM_READ_WRITE
, sizeof(float), NULL
, &cl_error
);
231 assert(cl_error
== CL_SUCCESS
);
232 // TODO: handle errors, check clCreateBuffer flags
234 /* initialize to NULL pointers to data that is not allocated here and will
235 need reallocation in nbnxn_gpu_init_atomdata */
239 /* size -1 indicates that the respective array hasn't been initialized yet */
244 /*! \brief Copies all parameters related to the cut-off from ic to nbp
246 static void set_cutoff_parameters(cl_nbparam_t
*nbp
,
247 const interaction_const_t
*ic
,
248 const NbnxnListParameters
*listParams
)
250 nbp
->ewald_beta
= ic
->ewaldcoeff_q
;
251 nbp
->sh_ewald
= ic
->sh_ewald
;
252 nbp
->epsfac
= ic
->epsfac
;
253 nbp
->two_k_rf
= 2.0 * ic
->k_rf
;
254 nbp
->c_rf
= ic
->c_rf
;
255 nbp
->rvdw_sq
= ic
->rvdw
* ic
->rvdw
;
256 nbp
->rcoulomb_sq
= ic
->rcoulomb
* ic
->rcoulomb
;
257 nbp
->rlistOuter_sq
= listParams
->rlistOuter
* listParams
->rlistOuter
;
258 nbp
->rlistInner_sq
= listParams
->rlistInner
* listParams
->rlistInner
;
259 nbp
->useDynamicPruning
= listParams
->useDynamicPruning
;
261 nbp
->sh_lj_ewald
= ic
->sh_lj_ewald
;
262 nbp
->ewaldcoeff_lj
= ic
->ewaldcoeff_lj
;
264 nbp
->rvdw_switch
= ic
->rvdw_switch
;
265 nbp
->dispersion_shift
= ic
->dispersion_shift
;
266 nbp
->repulsion_shift
= ic
->repulsion_shift
;
267 nbp
->vdw_switch
= ic
->vdw_switch
;
270 /*! \brief Returns the kinds of electrostatics and Vdw OpenCL
271 * kernels that will be used.
273 * Respectively, these values are from enum eelOcl and enum
276 map_interaction_types_to_gpu_kernel_flavors(const interaction_const_t
*ic
,
281 if (ic
->vdwtype
== evdwCUT
)
283 switch (ic
->vdw_modifier
)
286 case eintmodPOTSHIFT
:
290 *gpu_vdwtype
= evdwOclCUT
;
293 *gpu_vdwtype
= evdwOclCUTCOMBGEOM
;
296 *gpu_vdwtype
= evdwOclCUTCOMBLB
;
299 gmx_incons("The requested LJ combination rule is not implemented in the OpenCL GPU accelerated kernels!");
303 case eintmodFORCESWITCH
:
304 *gpu_vdwtype
= evdwOclFSWITCH
;
306 case eintmodPOTSWITCH
:
307 *gpu_vdwtype
= evdwOclPSWITCH
;
310 gmx_incons("The requested VdW interaction modifier is not implemented in the GPU accelerated kernels!");
314 else if (ic
->vdwtype
== evdwPME
)
316 if (ic
->ljpme_comb_rule
== ljcrGEOM
)
318 *gpu_vdwtype
= evdwOclEWALDGEOM
;
322 *gpu_vdwtype
= evdwOclEWALDLB
;
327 gmx_incons("The requested VdW type is not implemented in the GPU accelerated kernels!");
330 if (ic
->eeltype
== eelCUT
)
332 *gpu_eeltype
= eelOclCUT
;
334 else if (EEL_RF(ic
->eeltype
))
336 *gpu_eeltype
= eelOclRF
;
338 else if ((EEL_PME(ic
->eeltype
) || ic
->eeltype
== eelEWALD
))
340 /* Initially rcoulomb == rvdw, so it's surely not twin cut-off. */
341 *gpu_eeltype
= nbnxn_gpu_pick_ewald_kernel_type(false);
345 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
346 gmx_incons("The requested electrostatics type is not implemented in the GPU accelerated kernels!");
350 /*! \brief Initializes the nonbonded parameter data structure.
352 static void init_nbparam(cl_nbparam_t
*nbp
,
353 const interaction_const_t
*ic
,
354 const NbnxnListParameters
*listParams
,
355 const nbnxn_atomdata_t
*nbat
,
356 const gmx_device_runtime_data_t
*runData
)
358 int ntypes
, nnbfp
, nnbfp_comb
;
362 ntypes
= nbat
->ntype
;
364 set_cutoff_parameters(nbp
, ic
, listParams
);
366 map_interaction_types_to_gpu_kernel_flavors(ic
,
371 if (ic
->vdwtype
== evdwPME
)
373 if (ic
->ljpme_comb_rule
== ljcrGEOM
)
375 assert(nbat
->comb_rule
== ljcrGEOM
);
379 assert(nbat
->comb_rule
== ljcrLB
);
382 /* generate table for PME */
383 nbp
->coulomb_tab_climg2d
= NULL
;
384 if (nbp
->eeltype
== eelOclEWALD_TAB
|| nbp
->eeltype
== eelOclEWALD_TAB_TWIN
)
386 init_ewald_coulomb_force_table(ic
, nbp
, runData
);
389 // TODO: improvement needed.
390 // The image2d is created here even if eeltype is not eelCuEWALD_TAB or eelCuEWALD_TAB_TWIN because the OpenCL kernels
391 // don't accept NULL values for image2D parameters.
393 /* Switched from using textures to using buffers */
394 // TODO: decide which alternative is most efficient - textures or buffers.
396 cl_image_format array_format;
398 array_format.image_channel_data_type = CL_FLOAT;
399 array_format.image_channel_order = CL_R;
401 nbp->coulomb_tab_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_WRITE,
402 &array_format, 1, 1, 0, NULL, &cl_error);
405 nbp
->coulomb_tab_climg2d
= clCreateBuffer(runData
->context
, CL_MEM_READ_ONLY
, sizeof(cl_float
), NULL
, &cl_error
);
406 // TODO: handle errors
409 nnbfp
= 2*ntypes
*ntypes
;
410 nnbfp_comb
= 2*ntypes
;
413 /* Switched from using textures to using buffers */
414 // TODO: decide which alternative is most efficient - textures or buffers.
416 cl_image_format array_format;
418 array_format.image_channel_data_type = CL_FLOAT;
419 array_format.image_channel_order = CL_R;
421 nbp->nbfp_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
422 &array_format, nnbfp, 1, 0, nbat->nbfp, &cl_error);
425 nbp
->nbfp_climg2d
= clCreateBuffer(runData
->context
, CL_MEM_READ_ONLY
| CL_MEM_COPY_HOST_PTR
, nnbfp
*sizeof(cl_float
), nbat
->nbfp
, &cl_error
);
426 assert(cl_error
== CL_SUCCESS
);
427 // TODO: handle errors
429 if (ic
->vdwtype
== evdwPME
)
431 /* Switched from using textures to using buffers */
432 // TODO: decide which alternative is most efficient - textures or buffers.
433 /* nbp->nbfp_comb_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
434 &array_format, nnbfp_comb, 1, 0, nbat->nbfp_comb, &cl_error);*/
435 nbp
->nbfp_comb_climg2d
= clCreateBuffer(runData
->context
, CL_MEM_READ_ONLY
| CL_MEM_COPY_HOST_PTR
, nnbfp_comb
*sizeof(cl_float
), nbat
->nbfp_comb
, &cl_error
);
438 assert(cl_error
== CL_SUCCESS
);
439 // TODO: handle errors
443 // TODO: improvement needed.
444 // The image2d is created here even if vdwtype is not evdwPME because the OpenCL kernels
445 // don't accept NULL values for image2D parameters.
446 /* Switched from using textures to using buffers */
447 // TODO: decide which alternative is most efficient - textures or buffers.
448 /* nbp->nbfp_comb_climg2d = clCreateImage2D(runData->context, CL_MEM_READ_WRITE,
449 &array_format, 1, 1, 0, NULL, &cl_error);*/
450 nbp
->nbfp_comb_climg2d
= clCreateBuffer(runData
->context
, CL_MEM_READ_ONLY
, sizeof(cl_float
), NULL
, &cl_error
);
453 assert(cl_error
== CL_SUCCESS
);
454 // TODO: handle errors
459 //! This function is documented in the header file
460 void nbnxn_gpu_pme_loadbal_update_param(const nonbonded_verlet_t
*nbv
,
461 const interaction_const_t
*ic
,
462 const NbnxnListParameters
*listParams
)
464 if (!nbv
|| nbv
->grp
[0].kernel_type
!= nbnxnk8x8x8_GPU
)
468 gmx_nbnxn_ocl_t
*nb
= nbv
->gpu_nbv
;
469 cl_nbparam_t
*nbp
= nb
->nbparam
;
471 set_cutoff_parameters(nbp
, ic
, listParams
);
473 nbp
->eeltype
= nbnxn_gpu_pick_ewald_kernel_type(ic
->rcoulomb
!= ic
->rvdw
);
475 init_ewald_coulomb_force_table(ic
, nb
->nbparam
, nb
->dev_rundata
);
478 /*! \brief Initializes the pair list data structure.
480 static void init_plist(cl_plist_t
*pl
)
482 /* initialize to NULL pointers to data that is not allocated here and will
483 need reallocation in nbnxn_gpu_init_pairlist */
489 /* size -1 indicates that the respective array hasn't been initialized yet */
496 pl
->imask_nalloc
= -1;
498 pl
->excl_nalloc
= -1;
499 pl
->haveFreshList
= false;
502 /*! \brief Initializes the timer data structure.
504 static void init_timers(cl_timers_t
*t
,
507 for (int i
= 0; i
<= (bUseTwoStreams
? 1 : 0); i
++)
509 t
->didPairlistH2D
[i
] = false;
510 t
->didPrune
[i
] = false;
511 t
->didRollingPrune
[i
] = false;
515 /*! \brief Initializes the timings data structure.
517 static void init_timings(gmx_wallclock_gpu_nbnxn_t
*t
)
526 for (i
= 0; i
< 2; i
++)
528 for (j
= 0; j
< 2; j
++)
530 t
->ktime
[i
][j
].t
= 0.0;
531 t
->ktime
[i
][j
].c
= 0;
536 t
->pruneTime
.t
= 0.0;
537 t
->dynamicPruneTime
.c
= 0;
538 t
->dynamicPruneTime
.t
= 0.0;
541 /*! \brief Creates context for OpenCL GPU given by \p mygpu
543 * A fatal error results if creation fails.
545 * \param[inout] runtimeData runtime data including program and context
546 * \param[in] devInfo device info struct
547 * \param[in] rank MPI rank (for error reporting)
550 nbnxn_gpu_create_context(gmx_device_runtime_data_t
*runtimeData
,
551 const gmx_device_info_t
*devInfo
,
554 cl_context_properties context_properties
[3];
555 cl_platform_id platform_id
;
556 cl_device_id device_id
;
560 assert(runtimeData
!= NULL
);
561 assert(devInfo
!= NULL
);
563 platform_id
= devInfo
->ocl_gpu_id
.ocl_platform_id
;
564 device_id
= devInfo
->ocl_gpu_id
.ocl_device_id
;
566 context_properties
[0] = CL_CONTEXT_PLATFORM
;
567 context_properties
[1] = (cl_context_properties
) platform_id
;
568 context_properties
[2] = 0; /* Terminates the list of properties */
570 context
= clCreateContext(context_properties
, 1, &device_id
, NULL
, NULL
, &cl_error
);
571 if (CL_SUCCESS
!= cl_error
)
573 gmx_fatal(FARGS
, "On rank %d failed to create context for GPU #%s:\n OpenCL error %d: %s",
575 devInfo
->device_name
,
576 cl_error
, ocl_get_error_string(cl_error
).c_str());
580 runtimeData
->context
= context
;
583 /*! \brief Initializes the OpenCL kernel pointers of the nbnxn_ocl_ptr_t input data structure. */
584 static cl_kernel
nbnxn_gpu_create_kernel(gmx_nbnxn_ocl_t
*nb
,
585 const char *kernel_name
)
590 kernel
= clCreateKernel(nb
->dev_rundata
->program
, kernel_name
, &cl_error
);
591 if (CL_SUCCESS
!= cl_error
)
593 gmx_fatal(FARGS
, "Failed to create kernel '%s' for GPU #%s: OpenCL error %d",
595 nb
->dev_info
->device_name
,
602 /*! \brief Clears nonbonded shift force output array and energy outputs on the GPU.
605 nbnxn_ocl_clear_e_fshift(gmx_nbnxn_ocl_t
*nb
)
609 cl_atomdata_t
* adat
= nb
->atdat
;
610 cl_command_queue ls
= nb
->stream
[eintLocal
];
612 size_t local_work_size
[3] = {1, 1, 1};
613 size_t global_work_size
[3] = {1, 1, 1};
615 cl_int shifts
= SHIFTS
*3;
619 cl_kernel zero_e_fshift
= nb
->kernel_zero_e_fshift
;
621 local_work_size
[0] = 64;
622 // Round the total number of threads up from the array size
623 global_work_size
[0] = ((shifts
+ local_work_size
[0] - 1)/local_work_size
[0])*local_work_size
[0];
626 cl_error
= clSetKernelArg(zero_e_fshift
, arg_no
++, sizeof(cl_mem
), &(adat
->fshift
));
627 cl_error
|= clSetKernelArg(zero_e_fshift
, arg_no
++, sizeof(cl_mem
), &(adat
->e_lj
));
628 cl_error
|= clSetKernelArg(zero_e_fshift
, arg_no
++, sizeof(cl_mem
), &(adat
->e_el
));
629 cl_error
|= clSetKernelArg(zero_e_fshift
, arg_no
++, sizeof(cl_uint
), &shifts
);
630 GMX_ASSERT(cl_error
== CL_SUCCESS
, ocl_get_error_string(cl_error
).c_str());
632 cl_error
= clEnqueueNDRangeKernel(ls
, zero_e_fshift
, 3, NULL
, global_work_size
, local_work_size
, 0, NULL
, NULL
);
633 GMX_ASSERT(cl_error
== CL_SUCCESS
, ocl_get_error_string(cl_error
).c_str());
636 /*! \brief Initializes the OpenCL kernel pointers of the nbnxn_ocl_ptr_t input data structure. */
637 static void nbnxn_gpu_init_kernels(gmx_nbnxn_ocl_t
*nb
)
639 /* Init to 0 main kernel arrays */
640 /* They will be later on initialized in select_nbnxn_kernel */
641 // TODO: consider always creating all variants of the kernels here so that there is no
642 // need for late call to clCreateKernel -- if that gives any advantage?
643 memset(nb
->kernel_ener_noprune_ptr
, 0, sizeof(nb
->kernel_ener_noprune_ptr
));
644 memset(nb
->kernel_ener_prune_ptr
, 0, sizeof(nb
->kernel_ener_prune_ptr
));
645 memset(nb
->kernel_noener_noprune_ptr
, 0, sizeof(nb
->kernel_noener_noprune_ptr
));
646 memset(nb
->kernel_noener_prune_ptr
, 0, sizeof(nb
->kernel_noener_prune_ptr
));
648 /* Init pruning kernels
650 * TODO: we could avoid creating kernels if dynamic pruning is turned off,
651 * but ATM that depends on force flags not passed into the initialization.
653 nb
->kernel_pruneonly
[epruneFirst
] = nbnxn_gpu_create_kernel(nb
, "nbnxn_kernel_prune_opencl");
654 nb
->kernel_pruneonly
[epruneRolling
] = nbnxn_gpu_create_kernel(nb
, "nbnxn_kernel_prune_rolling_opencl");
656 /* Init auxiliary kernels */
657 nb
->kernel_memset_f
= nbnxn_gpu_create_kernel(nb
, "memset_f");
658 nb
->kernel_memset_f2
= nbnxn_gpu_create_kernel(nb
, "memset_f2");
659 nb
->kernel_memset_f3
= nbnxn_gpu_create_kernel(nb
, "memset_f3");
660 nb
->kernel_zero_e_fshift
= nbnxn_gpu_create_kernel(nb
, "zero_e_fshift");
663 /*! \brief Initializes simulation constant data.
665 * Initializes members of the atomdata and nbparam structs and
666 * clears e/fshift output buffers.
668 static void nbnxn_ocl_init_const(gmx_nbnxn_ocl_t
*nb
,
669 const interaction_const_t
*ic
,
670 const NbnxnListParameters
*listParams
,
671 const nbnxn_atomdata_t
*nbat
)
673 init_atomdata_first(nb
->atdat
, nbat
->ntype
, nb
->dev_rundata
);
674 init_nbparam(nb
->nbparam
, ic
, listParams
, nbat
, nb
->dev_rundata
);
678 //! This function is documented in the header file
679 void nbnxn_gpu_init(gmx_nbnxn_ocl_t
**p_nb
,
680 const gmx_device_info_t
*deviceInfo
,
681 const interaction_const_t
*ic
,
682 const NbnxnListParameters
*listParams
,
683 const nbnxn_atomdata_t
*nbat
,
685 gmx_bool bLocalAndNonlocal
)
689 cl_command_queue_properties queue_properties
;
700 snew(nb
->nbparam
, 1);
701 snew(nb
->plist
[eintLocal
], 1);
702 if (bLocalAndNonlocal
)
704 snew(nb
->plist
[eintNonlocal
], 1);
707 nb
->bUseTwoStreams
= bLocalAndNonlocal
;
709 nb
->timers
= new cl_timers_t();
710 snew(nb
->timings
, 1);
712 /* set device info, just point it to the right GPU among the detected ones */
713 nb
->dev_info
= deviceInfo
;
714 snew(nb
->dev_rundata
, 1);
717 ocl_pmalloc((void**)&nb
->nbst
.e_lj
, sizeof(*nb
->nbst
.e_lj
));
718 ocl_pmalloc((void**)&nb
->nbst
.e_el
, sizeof(*nb
->nbst
.e_el
));
719 ocl_pmalloc((void**)&nb
->nbst
.fshift
, SHIFTS
* sizeof(*nb
->nbst
.fshift
));
721 init_plist(nb
->plist
[eintLocal
]);
723 /* OpenCL timing disabled if GMX_DISABLE_GPU_TIMING is defined. */
724 nb
->bDoTime
= (getenv("GMX_DISABLE_GPU_TIMING") == NULL
);
726 /* Create queues only after bDoTime has been initialized */
729 queue_properties
= CL_QUEUE_PROFILING_ENABLE
;
733 queue_properties
= 0;
736 nbnxn_gpu_create_context(nb
->dev_rundata
, nb
->dev_info
, rank
);
738 /* local/non-local GPU streams */
739 nb
->stream
[eintLocal
] = clCreateCommandQueue(nb
->dev_rundata
->context
, nb
->dev_info
->ocl_gpu_id
.ocl_device_id
, queue_properties
, &cl_error
);
740 if (CL_SUCCESS
!= cl_error
)
742 gmx_fatal(FARGS
, "On rank %d failed to create context for GPU #%s: OpenCL error %d",
744 nb
->dev_info
->device_name
,
749 if (nb
->bUseTwoStreams
)
751 init_plist(nb
->plist
[eintNonlocal
]);
753 nb
->stream
[eintNonlocal
] = clCreateCommandQueue(nb
->dev_rundata
->context
, nb
->dev_info
->ocl_gpu_id
.ocl_device_id
, queue_properties
, &cl_error
);
754 if (CL_SUCCESS
!= cl_error
)
756 gmx_fatal(FARGS
, "On rank %d failed to create context for GPU #%s: OpenCL error %d",
758 nb
->dev_info
->device_name
,
766 init_timers(nb
->timers
, nb
->bUseTwoStreams
);
767 init_timings(nb
->timings
);
770 nbnxn_ocl_init_const(nb
, ic
, listParams
, nbat
);
772 /* Enable LJ param manual prefetch for AMD or if we request through env. var.
773 * TODO: decide about NVIDIA
775 nb
->bPrefetchLjParam
=
776 (getenv("GMX_OCL_DISABLE_I_PREFETCH") == NULL
) &&
777 ((nb
->dev_info
->vendor_e
== OCL_VENDOR_AMD
) || (getenv("GMX_OCL_ENABLE_I_PREFETCH") != NULL
));
779 /* NOTE: in CUDA we pick L1 cache configuration for the nbnxn kernels here,
780 * but sadly this is not supported in OpenCL (yet?). Consider adding it if
781 * it becomes supported.
783 nbnxn_gpu_compile_kernels(nb
);
784 nbnxn_gpu_init_kernels(nb
);
786 /* clear energy and shift force outputs */
787 nbnxn_ocl_clear_e_fshift(nb
);
793 fprintf(debug
, "Initialized OpenCL data structures.\n");
797 /*! \brief Clears the first natoms_clear elements of the GPU nonbonded force output array.
799 static void nbnxn_ocl_clear_f(gmx_nbnxn_ocl_t
*nb
, int natoms_clear
)
801 if (natoms_clear
== 0)
806 cl_atomdata_t
* adat
= nb
->atdat
;
807 cl_command_queue ls
= nb
->stream
[eintLocal
];
808 cl_float value
= 0.0f
;
810 size_t local_work_size
[3] = {1, 1, 1};
811 size_t global_work_size
[3] = {1, 1, 1};
815 cl_kernel memset_f
= nb
->kernel_memset_f
;
817 cl_uint natoms_flat
= natoms_clear
* (sizeof(rvec
)/sizeof(real
));
819 local_work_size
[0] = 64;
820 // Round the total number of threads up from the array size
821 global_work_size
[0] = ((natoms_flat
+ local_work_size
[0] - 1)/local_work_size
[0])*local_work_size
[0];
824 cl_int gmx_used_in_debug cl_error
;
826 cl_error
= clSetKernelArg(memset_f
, arg_no
++, sizeof(cl_mem
), &(adat
->f
));
827 cl_error
|= clSetKernelArg(memset_f
, arg_no
++, sizeof(cl_float
), &value
);
828 cl_error
|= clSetKernelArg(memset_f
, arg_no
++, sizeof(cl_uint
), &natoms_flat
);
829 assert(cl_error
== CL_SUCCESS
);
831 cl_error
= clEnqueueNDRangeKernel(ls
, memset_f
, 3, NULL
, global_work_size
, local_work_size
, 0, NULL
, NULL
);
832 assert(cl_error
== CL_SUCCESS
);
835 //! This function is documented in the header file
837 nbnxn_gpu_clear_outputs(gmx_nbnxn_ocl_t
*nb
,
840 nbnxn_ocl_clear_f(nb
, nb
->atdat
->natoms
);
841 /* clear shift force array and energies if the outputs were
842 used in the current step */
843 if (flags
& GMX_FORCE_VIRIAL
)
845 nbnxn_ocl_clear_e_fshift(nb
);
848 /* kick off buffer clearing kernel to ensure concurrency with constraints/update */
849 cl_int gmx_unused cl_error
;
850 cl_error
= clFlush(nb
->stream
[eintLocal
]);
851 assert(CL_SUCCESS
== cl_error
);
854 //! This function is documented in the header file
855 void nbnxn_gpu_init_pairlist(gmx_nbnxn_ocl_t
*nb
,
856 const nbnxn_pairlist_t
*h_plist
,
859 if (canSkipWork(nb
, iloc
))
865 bool bDoTime
= nb
->bDoTime
;
866 cl_command_queue stream
= nb
->stream
[iloc
];
867 cl_plist_t
*d_plist
= nb
->plist
[iloc
];
869 if (d_plist
->na_c
< 0)
871 d_plist
->na_c
= h_plist
->na_ci
;
875 if (d_plist
->na_c
!= h_plist
->na_ci
)
877 sprintf(sbuf
, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
878 d_plist
->na_c
, h_plist
->na_ci
);
885 nb
->timers
->pl_h2d
[iloc
].openTimingRegion(stream
);
886 nb
->timers
->didPairlistH2D
[iloc
] = true;
889 ocl_realloc_buffered(&d_plist
->sci
, h_plist
->sci
, sizeof(nbnxn_sci_t
),
890 &d_plist
->nsci
, &d_plist
->sci_nalloc
,
892 nb
->dev_rundata
->context
,
893 stream
, true, bDoTime
? nb
->timers
->pl_h2d
[iloc
].fetchNextEvent() : nullptr);
895 ocl_realloc_buffered(&d_plist
->cj4
, h_plist
->cj4
, sizeof(nbnxn_cj4_t
),
896 &d_plist
->ncj4
, &d_plist
->cj4_nalloc
,
898 nb
->dev_rundata
->context
,
899 stream
, true, bDoTime
? nb
->timers
->pl_h2d
[iloc
].fetchNextEvent() : nullptr);
901 /* this call only allocates space on the device (no data is transferred) - no timing as well! */
902 ocl_realloc_buffered(&d_plist
->imask
, NULL
, sizeof(unsigned int),
903 &d_plist
->nimask
, &d_plist
->imask_nalloc
,
904 h_plist
->ncj4
*c_nbnxnGpuClusterpairSplit
,
905 nb
->dev_rundata
->context
,
908 ocl_realloc_buffered(&d_plist
->excl
, h_plist
->excl
, sizeof(nbnxn_excl_t
),
909 &d_plist
->nexcl
, &d_plist
->excl_nalloc
,
911 nb
->dev_rundata
->context
,
912 stream
, true, bDoTime
? nb
->timers
->pl_h2d
[iloc
].fetchNextEvent() : nullptr);
916 nb
->timers
->pl_h2d
[iloc
].closeTimingRegion(stream
);
919 /* need to prune the pair list during the next step */
920 d_plist
->haveFreshList
= true;
923 //! This function is documented in the header file
924 void nbnxn_gpu_upload_shiftvec(gmx_nbnxn_ocl_t
*nb
,
925 const nbnxn_atomdata_t
*nbatom
)
927 cl_atomdata_t
*adat
= nb
->atdat
;
928 cl_command_queue ls
= nb
->stream
[eintLocal
];
930 /* only if we have a dynamic box */
931 if (nbatom
->bDynamicBox
|| !adat
->bShiftVecUploaded
)
933 ocl_copy_H2D_async(adat
->shift_vec
, nbatom
->shift_vec
, 0,
934 SHIFTS
* adat
->shift_vec_elem_size
, ls
, NULL
);
935 adat
->bShiftVecUploaded
= true;
939 //! This function is documented in the header file
940 void nbnxn_gpu_init_atomdata(gmx_nbnxn_ocl_t
*nb
,
941 const nbnxn_atomdata_t
*nbat
)
946 bool bDoTime
= nb
->bDoTime
;
947 cl_timers_t
*timers
= nb
->timers
;
948 cl_atomdata_t
*d_atdat
= nb
->atdat
;
949 cl_command_queue ls
= nb
->stream
[eintLocal
];
951 natoms
= nbat
->natoms
;
956 /* time async copy */
957 timers
->atdat
.openTimingRegion(ls
);
960 /* need to reallocate if we have to copy more atoms than the amount of space
961 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
962 if (natoms
> d_atdat
->nalloc
)
964 nalloc
= over_alloc_small(natoms
);
966 /* free up first if the arrays have already been initialized */
967 if (d_atdat
->nalloc
!= -1)
969 freeDeviceBuffer(&d_atdat
->f
);
970 freeDeviceBuffer(&d_atdat
->xq
);
971 freeDeviceBuffer(&d_atdat
->lj_comb
);
972 freeDeviceBuffer(&d_atdat
->atom_types
);
975 d_atdat
->f_elem_size
= sizeof(rvec
);
977 // TODO: handle errors, check clCreateBuffer flags
978 d_atdat
->f
= clCreateBuffer(nb
->dev_rundata
->context
, CL_MEM_READ_WRITE
, nalloc
* d_atdat
->f_elem_size
, NULL
, &cl_error
);
979 assert(CL_SUCCESS
== cl_error
);
981 // TODO: change the flag to read-only
982 d_atdat
->xq
= clCreateBuffer(nb
->dev_rundata
->context
, CL_MEM_READ_WRITE
, nalloc
* sizeof(cl_float4
), NULL
, &cl_error
);
983 assert(CL_SUCCESS
== cl_error
);
984 // TODO: handle errors, check clCreateBuffer flags
986 if (useLjCombRule(nb
->nbparam
->vdwtype
))
988 // TODO: change the flag to read-only
989 d_atdat
->lj_comb
= clCreateBuffer(nb
->dev_rundata
->context
, CL_MEM_READ_WRITE
, nalloc
* sizeof(cl_float2
), NULL
, &cl_error
);
990 assert(CL_SUCCESS
== cl_error
);
991 // TODO: handle errors, check clCreateBuffer flags
995 // TODO: change the flag to read-only
996 d_atdat
->atom_types
= clCreateBuffer(nb
->dev_rundata
->context
, CL_MEM_READ_WRITE
, nalloc
* sizeof(int), NULL
, &cl_error
);
997 assert(CL_SUCCESS
== cl_error
);
998 // TODO: handle errors, check clCreateBuffer flags
1001 d_atdat
->nalloc
= nalloc
;
1005 d_atdat
->natoms
= natoms
;
1006 d_atdat
->natoms_local
= nbat
->natoms_local
;
1008 /* need to clear GPU f output if realloc happened */
1011 nbnxn_ocl_clear_f(nb
, nalloc
);
1014 if (useLjCombRule(nb
->nbparam
->vdwtype
))
1016 ocl_copy_H2D_async(d_atdat
->lj_comb
, nbat
->lj_comb
, 0,
1017 natoms
*sizeof(cl_float2
), ls
, bDoTime
? timers
->atdat
.fetchNextEvent() : nullptr);
1021 ocl_copy_H2D_async(d_atdat
->atom_types
, nbat
->type
, 0,
1022 natoms
*sizeof(int), ls
, bDoTime
? timers
->atdat
.fetchNextEvent() : nullptr);
1028 timers
->atdat
.closeTimingRegion(ls
);
1031 /* kick off the tasks enqueued above to ensure concurrency with the search */
1032 cl_error
= clFlush(ls
);
1033 assert(CL_SUCCESS
== cl_error
);
1036 /*! \brief Releases an OpenCL kernel pointer */
1037 static void free_kernel(cl_kernel
*kernel_ptr
)
1039 cl_int gmx_unused cl_error
;
1041 assert(NULL
!= kernel_ptr
);
1045 cl_error
= clReleaseKernel(*kernel_ptr
);
1046 assert(cl_error
== CL_SUCCESS
);
1052 /*! \brief Releases a list of OpenCL kernel pointers */
1053 static void free_kernels(cl_kernel
*kernels
, int count
)
1057 for (i
= 0; i
< count
; i
++)
1059 free_kernel(kernels
+ i
);
1063 /*! \brief Free the OpenCL runtime data (context and program).
1065 * The function releases the OpenCL context and program assuciated with the
1066 * device that the calling PP rank is running on.
1068 * \param runData [in] porinter to the structure with runtime data.
1070 static void free_gpu_device_runtime_data(gmx_device_runtime_data_t
*runData
)
1072 if (runData
== NULL
)
1077 cl_int gmx_unused cl_error
;
1079 if (runData
->context
)
1081 cl_error
= clReleaseContext(runData
->context
);
1082 runData
->context
= NULL
;
1083 assert(CL_SUCCESS
== cl_error
);
1086 if (runData
->program
)
1088 cl_error
= clReleaseProgram(runData
->program
);
1089 runData
->program
= NULL
;
1090 assert(CL_SUCCESS
== cl_error
);
1095 //! This function is documented in the header file
1096 void nbnxn_gpu_free(gmx_nbnxn_ocl_t
*nb
)
1104 int kernel_count
= sizeof(nb
->kernel_ener_noprune_ptr
) / sizeof(nb
->kernel_ener_noprune_ptr
[0][0]);
1105 free_kernels((cl_kernel
*)nb
->kernel_ener_noprune_ptr
, kernel_count
);
1107 kernel_count
= sizeof(nb
->kernel_ener_prune_ptr
) / sizeof(nb
->kernel_ener_prune_ptr
[0][0]);
1108 free_kernels((cl_kernel
*)nb
->kernel_ener_prune_ptr
, kernel_count
);
1110 kernel_count
= sizeof(nb
->kernel_noener_noprune_ptr
) / sizeof(nb
->kernel_noener_noprune_ptr
[0][0]);
1111 free_kernels((cl_kernel
*)nb
->kernel_noener_noprune_ptr
, kernel_count
);
1113 kernel_count
= sizeof(nb
->kernel_noener_prune_ptr
) / sizeof(nb
->kernel_noener_prune_ptr
[0][0]);
1114 free_kernels((cl_kernel
*)nb
->kernel_noener_prune_ptr
, kernel_count
);
1116 free_kernel(&(nb
->kernel_memset_f
));
1117 free_kernel(&(nb
->kernel_memset_f2
));
1118 free_kernel(&(nb
->kernel_memset_f3
));
1119 free_kernel(&(nb
->kernel_zero_e_fshift
));
1122 freeDeviceBuffer(&(nb
->atdat
->xq
));
1123 freeDeviceBuffer(&(nb
->atdat
->f
));
1124 freeDeviceBuffer(&(nb
->atdat
->e_lj
));
1125 freeDeviceBuffer(&(nb
->atdat
->e_el
));
1126 freeDeviceBuffer(&(nb
->atdat
->fshift
));
1127 freeDeviceBuffer(&(nb
->atdat
->lj_comb
));
1128 freeDeviceBuffer(&(nb
->atdat
->atom_types
));
1129 freeDeviceBuffer(&(nb
->atdat
->shift_vec
));
1133 freeDeviceBuffer(&(nb
->nbparam
->nbfp_climg2d
));
1134 freeDeviceBuffer(&(nb
->nbparam
->nbfp_comb_climg2d
));
1135 freeDeviceBuffer(&(nb
->nbparam
->coulomb_tab_climg2d
));
1139 auto *plist
= nb
->plist
[eintLocal
];
1140 freeDeviceBuffer(&plist
->sci
);
1141 freeDeviceBuffer(&plist
->cj4
);
1142 freeDeviceBuffer(&plist
->imask
);
1143 freeDeviceBuffer(&plist
->excl
);
1145 if (nb
->bUseTwoStreams
)
1147 auto *plist_nl
= nb
->plist
[eintNonlocal
];
1148 freeDeviceBuffer(&plist_nl
->sci
);
1149 freeDeviceBuffer(&plist_nl
->cj4
);
1150 freeDeviceBuffer(&plist_nl
->imask
);
1151 freeDeviceBuffer(&plist_nl
->excl
);
1156 ocl_pfree(nb
->nbst
.e_lj
);
1157 nb
->nbst
.e_lj
= NULL
;
1159 ocl_pfree(nb
->nbst
.e_el
);
1160 nb
->nbst
.e_el
= NULL
;
1162 ocl_pfree(nb
->nbst
.fshift
);
1163 nb
->nbst
.fshift
= NULL
;
1165 /* Free command queues */
1166 clReleaseCommandQueue(nb
->stream
[eintLocal
]);
1167 nb
->stream
[eintLocal
] = NULL
;
1168 if (nb
->bUseTwoStreams
)
1170 clReleaseCommandQueue(nb
->stream
[eintNonlocal
]);
1171 nb
->stream
[eintNonlocal
] = NULL
;
1173 /* Free other events */
1174 if (nb
->nonlocal_done
)
1176 clReleaseEvent(nb
->nonlocal_done
);
1177 nb
->nonlocal_done
= NULL
;
1179 if (nb
->misc_ops_and_local_H2D_done
)
1181 clReleaseEvent(nb
->misc_ops_and_local_H2D_done
);
1182 nb
->misc_ops_and_local_H2D_done
= NULL
;
1185 free_gpu_device_runtime_data(nb
->dev_rundata
);
1186 sfree(nb
->dev_rundata
);
1188 /* Free timers and timings */
1195 fprintf(debug
, "Cleaned up OpenCL data structures.\n");
1199 //! This function is documented in the header file
1200 gmx_wallclock_gpu_nbnxn_t
*nbnxn_gpu_get_timings(gmx_nbnxn_ocl_t
*nb
)
1202 return (nb
!= nullptr && nb
->bDoTime
) ? nb
->timings
: nullptr;
1205 //! This function is documented in the header file
1206 void nbnxn_gpu_reset_timings(nonbonded_verlet_t
* nbv
)
1208 if (nbv
->gpu_nbv
&& nbv
->gpu_nbv
->bDoTime
)
1210 init_timings(nbv
->gpu_nbv
->timings
);
1214 //! This function is documented in the header file
1215 int nbnxn_gpu_min_ci_balanced(gmx_nbnxn_ocl_t
*nb
)
1218 gpu_min_ci_balanced_factor
* nb
->dev_info
->compute_units
: 0;
1221 //! This function is documented in the header file
1222 gmx_bool
nbnxn_gpu_is_kernel_ewald_analytical(const gmx_nbnxn_ocl_t
*nb
)
1224 return ((nb
->nbparam
->eeltype
== eelOclEWALD_ANA
) ||
1225 (nb
->nbparam
->eeltype
== eelOclEWALD_ANA_TWIN
));