2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief OpenCL non-bonded kernel.
39 * OpenCL 1.2 support is expected.
41 * \author Anca Hamuraru <anca@streamcomputing.eu>
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 /* Currently we enable CJ prefetch for AMD/NVIDIA and disable it for the "nowarp" kernel
47 * Note that this should precede the kernel_utils include.
49 #include "nbnxm_ocl_kernel_utils.clh"
51 /////////////////////////////////////////////////////////////////////////////////////////////////
53 #if defined EL_EWALD_ANA || defined EL_EWALD_TAB
54 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
58 #if defined EL_EWALD_ANY || defined EL_RF || defined LJ_EWALD || (defined EL_CUTOFF && defined CALC_ENERGIES)
59 /* Macro to control the calculation of exclusion forces in the kernel
60 * We do that with Ewald (elec/vdw) and RF. Cut-off only has exclusion
63 * Note: convenience macro, needs to be undef-ed at the end of the file.
65 #define EXCLUSION_FORCES
68 #if defined LJ_EWALD_COMB_GEOM || defined LJ_EWALD_COMB_LB
69 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
73 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
74 /* Note: convenience macro, needs to be undef-ed at the end of the file. */
79 Kernel launch parameters:
80 - #blocks = #pair lists, blockId = pair list Id
81 - #threads = CL_SIZE^2
82 - shmem = CL_SIZE^2 * sizeof(float)
84 Each thread calculates an i force-component taking one pair of i-j atoms.
86 TODO: implement 128 threads/wavefront by porting over the NTHREAD_Z/j4 loop
87 "horizontal splitting" over threads.
91 NB_KERNEL_FUNC_NAME differs from the CUDA equivalent as it is not a variadic macro due to OpenCL not having a support for them, this version only takes exactly 2 arguments.
92 Thus if more strings need to be appended a new macro must be written or it must be directly appended here.
94 __attribute__((reqd_work_group_size(CL_SIZE, CL_SIZE, 1)))
95 #ifdef cl_intel_required_subgroup_size
96 __attribute__((intel_reqd_sub_group_size(SUBGROUP_SIZE)))
100 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_prune_opencl)
102 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_prune_opencl)
106 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _VF_opencl)
108 __kernel void NB_KERNEL_FUNC_NAME(nbnxn_kernel, _F_opencl)
115 cl_nbparam_params_t nbparam_params, /* IN */
116 const __global float4 *restrict xq, /* IN */
117 __global float *restrict f, /* OUT stores float3 values */
118 __global float *restrict gmx_unused e_lj, /* OUT */
119 __global float *restrict gmx_unused e_el, /* OUT */
120 __global float *restrict fshift, /* OUT stores float3 values */
122 const __global float2 *restrict lj_comb, /* IN stores float2 values */
124 const __global int *restrict atom_types, /* IN */
126 const __global float *restrict shift_vec, /* IN stores float3 values */
127 __constant const float* gmx_unused nbfp_climg2d, /* IN */
128 __constant const float* gmx_unused nbfp_comb_climg2d, /* IN */
129 __constant const float* gmx_unused coulomb_tab_climg2d, /* IN */
130 const __global nbnxn_sci_t* pl_sci, /* IN */
134 __global nbnxn_cj4_t* pl_cj4, /* OUT / IN */
135 const __global nbnxn_excl_t* excl, /* IN */
136 int bCalcFshift, /* IN */
137 __local float4 *xqib /* Pointer to dyn alloc'ed shmem */
140 /* convenience variables */
141 const cl_nbparam_params_t *const nbparam = &nbparam_params;
143 const float rcoulomb_sq = nbparam->rcoulomb_sq;
144 #ifdef VDW_CUTOFF_CHECK
145 const float rvdw_sq = nbparam_params.rvdw_sq;
148 const float two_k_rf = nbparam->two_k_rf;
151 const float coulomb_tab_scale = nbparam->coulomb_tab_scale;
154 const float beta2 = nbparam->ewald_beta*nbparam->ewald_beta;
155 const float beta3 = nbparam->ewald_beta*nbparam->ewald_beta*nbparam->ewald_beta;
158 const float rlist_sq = nbparam->rlistOuter_sq;
163 const float beta = nbparam->ewald_beta;
164 const float ewald_shift = nbparam->sh_ewald;
166 const float gmx_unused c_rf = nbparam->c_rf;
167 #endif /* EL_EWALD_ANY */
168 #endif /* CALC_ENERGIES */
170 /* thread/block/warp id-s */
171 const unsigned int tidxi = get_local_id(0);
172 const unsigned int tidxj = get_local_id(1);
173 const unsigned int tidx = get_local_id(1) * get_local_size(0) + get_local_id(0);
174 const unsigned int bidx = get_group_id(0);
175 const unsigned int widx = tidx / WARP_SIZE; /* warp index */
177 /*! i-cluster interaction mask for a super-cluster with all NCL_PER_SUPERCL=8 bits set */
178 const unsigned superClInteractionMask = ((1U << NCL_PER_SUPERCL) - 1U);
180 #define LOCAL_OFFSET (xqib + NCL_PER_SUPERCL * CL_SIZE)
183 /* shmem buffer for cj, for both warps separately */
184 cjs = (__local int *)(LOCAL_OFFSET);
186 #define LOCAL_OFFSET (cjs + 2 * c_nbnxnGpuJgroupSize)
187 #endif //USE_CJ_PREFETCH
191 /* shmem buffer for i atom-type pre-loading */
192 __local int *atib = (__local int *)(LOCAL_OFFSET); //NOLINT(google-readability-casting)
194 #define LOCAL_OFFSET (atib + NCL_PER_SUPERCL * CL_SIZE)
196 __local float2 *ljcpib = (__local float2 *)(LOCAL_OFFSET);
198 #define LOCAL_OFFSET (ljcpib + NCL_PER_SUPERCL * CL_SIZE)
203 /* shmem j force buffer */
204 __local float *f_buf = (__local float *)(LOCAL_OFFSET);
206 #define LOCAL_OFFSET (f_buf + CL_SIZE * CL_SIZE * 3)
208 __local float *f_buf = 0;
210 #if !USE_SUBGROUP_ANY
211 /* Local buffer used to implement __any warp vote function from CUDA.
212 volatile is used to avoid compiler optimizations for AMD builds. */
213 volatile __local uint *warp_any = (__local uint*)(LOCAL_OFFSET);
215 __local uint gmx_unused *warp_any = 0;
219 const nbnxn_sci_t nb_sci = pl_sci[bidx]; /* my i super-cluster's index = current bidx */
220 const int sci = nb_sci.sci; /* super-cluster */
221 const int cij4_start = nb_sci.cj4_ind_start; /* first ...*/
222 const int cij4_end = nb_sci.cj4_ind_end; /* and last index of j clusters */
224 for (int i = 0; i < NCL_PER_SUPERCL; i += CL_SIZE)
226 /* Pre-load i-atom x and q into shared memory */
227 const int ci = sci * NCL_PER_SUPERCL + tidxj+i;
228 const int ai = ci * CL_SIZE + tidxi;
230 float4 xqbuf = xq[ai] + (float4)(shift_vec[3 * nb_sci.shift], shift_vec[3 * nb_sci.shift + 1], shift_vec[3 * nb_sci.shift + 2], 0.0f);
231 xqbuf.w *= nbparam->epsfac;
232 xqib[(tidxj + i) * CL_SIZE + tidxi] = xqbuf;
235 /* Pre-load the i-atom types into shared memory */
236 atib[(tidxj + i) * CL_SIZE + tidxi] = atom_types[ai];
238 ljcpib[(tidxj + i) * CL_SIZE + tidxi] = lj_comb[ai];
242 #if !USE_SUBGROUP_ANY
243 /* Initialise warp vote. (8x8 block) 2 warps for nvidia */
244 if (tidx == 0 || tidx == WARP_SIZE)
249 barrier(CLK_LOCAL_MEM_FENCE);
251 float3 fci_buf[NCL_PER_SUPERCL]; /* i force buffer */
252 for (int ci_offset = 0; ci_offset < NCL_PER_SUPERCL; ci_offset++)
254 fci_buf[ci_offset] = (float3)(0.0f);
258 /* TODO: we are trading registers with flops by keeping lje_coeff-s, try re-calculating it later */
259 const float lje_coeff2 = nbparam->ewaldcoeff_lj*nbparam->ewaldcoeff_lj;
260 const float lje_coeff6_6 = lje_coeff2*lje_coeff2*lje_coeff2*ONE_SIXTH_F;
261 #endif /* LJ_EWALD */
268 #if defined EXCLUSION_FORCES /* Ewald or RF */
269 if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci*NCL_PER_SUPERCL)
271 /* we have the diagonal: add the charge and LJ self interaction energy term */
272 for (int i = 0; i < NCL_PER_SUPERCL; i++)
274 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
275 const float qi = xqib[i * CL_SIZE + tidxi].w;
279 E_lj += nbfp_climg2d[atom_types[(sci*NCL_PER_SUPERCL + i)*CL_SIZE + tidxi]*(ntypes + 1)*2];
280 #endif /* LJ_EWALD */
283 /* divide the self term(s) equally over the j-threads, then multiply with the coefficients. */
286 E_lj *= 0.5f*ONE_SIXTH_F*lje_coeff6_6;
287 #endif /* LJ_EWALD */
289 #if defined EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF
290 /* Correct for epsfac^2 due to adding qi^2 */
291 E_el /= nbparam->epsfac*CL_SIZE;
292 #if defined EL_RF || defined EL_CUTOFF
295 E_el *= -beta*M_FLOAT_1_SQRTPI; /* last factor 1/sqrt(pi) */
297 #endif /* EL_EWALD_ANY || defined EL_RF || defined EL_CUTOFF */
299 #endif /* EXCLUSION_FORCES */
301 #endif /* CALC_ENERGIES */
303 #ifdef EXCLUSION_FORCES
304 const int nonSelfInteraction = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
307 /* loop over the j clusters = seen by any of the atoms in the current super-cluster */
308 for (int j4 = cij4_start; j4 < cij4_end; j4++)
310 const int wexcl_idx = pl_cj4[j4].imei[widx].excl_ind;
311 unsigned int imask = pl_cj4[j4].imei[widx].imask;
312 const unsigned int wexcl = excl[wexcl_idx].pair[(tidx) & (WARP_SIZE - 1)];
314 preloadCj4(&cjs, pl_cj4[j4].cj, tidxi, tidxj, imask != 0u);
320 /* Unrolling this loop improves performance without pruning but
321 * with pruning it leads to slowdown.
323 * Tested with driver 1800.5
325 * TODO: check loop unrolling with NVIDIA OpenCL
327 #if !defined PRUNE_NBL && !defined _NVIDIA_SOURCE_
330 for (int jm = 0; jm < c_nbnxnGpuJgroupSize; jm++)
332 if (imask & (superClInteractionMask << (jm * NCL_PER_SUPERCL)))
334 unsigned int mask_ji = (1U << (jm * NCL_PER_SUPERCL));
336 const int cj = loadCj(cjs, pl_cj4[j4].cj, jm, tidxi, tidxj);
337 const int aj = cj * CL_SIZE + tidxj;
339 /* load j atom data */
340 const float4 xjqbuf = xq[aj];
341 const float3 xj = (float3)(xjqbuf.xyz);
342 const float qj_f = xjqbuf.w;
344 const int typej = atom_types[aj];
346 const float2 ljcp_j = lj_comb[aj];
349 float3 fcj_buf = (float3)(0.0f);
351 #if !defined PRUNE_NBL
354 for (int i = 0; i < NCL_PER_SUPERCL; i++)
358 const int gmx_unused ci = sci * NCL_PER_SUPERCL + i; /* i cluster index */
360 /* all threads load an atom from i cluster ci into shmem! */
361 const float4 xiqbuf = xqib[i * CL_SIZE + tidxi];
362 const float3 xi = (float3)(xiqbuf.xyz);
364 /* distance between i and j atoms */
365 const float3 rv = xi - xj;
366 float r2 = norm2(rv);
369 if (!gmx_sub_group_any(warp_any, widx, r2 < rlist_sq))
375 const float int_bit = (wexcl & mask_ji) ? 1.0f : 0.0f;
377 /* cutoff & exclusion check */
378 #ifdef EXCLUSION_FORCES
379 if ((r2 < rcoulomb_sq) * (nonSelfInteraction | (ci != cj)))
381 if ((r2 < rcoulomb_sq) * int_bit)
384 /* load the rest of the i-atom parameters */
385 const float qi = xiqbuf.w;
388 const int typei = atib[i * CL_SIZE + tidxi];
390 const float2 ljcp_i = ljcpib[i * CL_SIZE + tidxi];
392 #else /* IATYPE_SHMEM */
393 const int ai = ci * CL_SIZE + tidxi; /* i atom index */
396 const int typei = atom_types[ai];
398 const float2 ljcp_i = lj_comb[ai];
400 #endif /* IATYPE_SHMEM */
401 /* LJ 6*C6 and 12*C12 */
403 const float c6 = nbfp_climg2d[2 * (ntypes * typei + typej)];
404 const float c12 = nbfp_climg2d[2 * (ntypes * typei + typej)+1];
407 const float c6 = ljcp_i.x * ljcp_j.x;
408 const float c12 = ljcp_i.y * ljcp_j.y;
410 /* LJ 2^(1/6)*sigma and 12*epsilon */
411 const float sigma = ljcp_i.x + ljcp_j.x;
412 const float epsilon = ljcp_i.y * ljcp_j.y;
413 #if defined CALC_ENERGIES || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
415 convert_sigma_epsilon_to_c6_c12(sigma, epsilon, &c6, &c12);
417 #endif /* LJ_COMB_GEOM */
420 // Ensure distance do not become so small that r^-12 overflows
421 r2 = max(r2, NBNXN_MIN_RSQ);
423 const float inv_r = rsqrt(r2);
424 const float inv_r2 = inv_r * inv_r;
425 #if !defined LJ_COMB_LB || defined CALC_ENERGIES
426 float inv_r6 = inv_r2 * inv_r2 * inv_r2;
427 #if defined EXCLUSION_FORCES
428 /* We could mask inv_r2, but with Ewald
429 * masking both inv_r6 and F_invr is faster */
431 #endif /* EXCLUSION_FORCES */
433 float F_invr = inv_r6 * (c12 * inv_r6 - c6) * inv_r2;
434 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
435 float E_lj_p = int_bit * (c12 * (inv_r6 * inv_r6 + nbparam->repulsion_shift.cpot)*ONE_TWELVETH_F -
436 c6 * (inv_r6 + nbparam->dispersion_shift.cpot)*ONE_SIXTH_F);
439 #else /* ! LJ_COMB_LB || CALC_ENERGIES */
440 const float sig_r = sigma*inv_r;
441 const float sig_r2 = sig_r*sig_r;
442 float sig_r6 = sig_r2*sig_r2*sig_r2;
443 #if defined EXCLUSION_FORCES
445 #endif /* EXCLUSION_FORCES */
447 float F_invr = epsilon * sig_r6 * (sig_r6 - 1.0f) * inv_r2;
448 #endif /* ! LJ_COMB_LB || CALC_ENERGIES */
451 #ifdef LJ_FORCE_SWITCH
453 calculate_force_switch_F_E(nbparam, c6, c12, inv_r, r2, &F_invr, &E_lj_p);
455 calculate_force_switch_F(nbparam, c6, c12, inv_r, r2, &F_invr);
456 #endif /* CALC_ENERGIES */
457 #endif /* LJ_FORCE_SWITCH */
461 #ifdef LJ_EWALD_COMB_GEOM
463 calculate_lj_ewald_comb_geom_F_E(nbfp_comb_climg2d, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, int_bit, &F_invr, &E_lj_p);
465 calculate_lj_ewald_comb_geom_F(nbfp_comb_climg2d, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6, &F_invr);
466 #endif /* CALC_ENERGIES */
467 #elif defined LJ_EWALD_COMB_LB
468 calculate_lj_ewald_comb_LB_F_E(nbfp_comb_climg2d, nbparam, typei, typej, r2, inv_r2, lje_coeff2, lje_coeff6_6,
470 int_bit, true, &F_invr, &E_lj_p
473 #endif /* CALC_ENERGIES */
475 #endif /* LJ_EWALD_COMB_GEOM */
476 #endif /* LJ_EWALD */
480 calculate_potential_switch_F_E(nbparam, inv_r, r2, &F_invr, &E_lj_p);
482 calculate_potential_switch_F(nbparam, inv_r, r2, &F_invr, &E_lj_p);
483 #endif /* CALC_ENERGIES */
484 #endif /* LJ_POT_SWITCH */
486 #ifdef VDW_CUTOFF_CHECK
487 /* Separate VDW cut-off check to enable twin-range cut-offs
488 * (rvdw < rcoulomb <= rlist)
490 const float vdw_in_range = (r2 < rvdw_sq) ? 1.0f : 0.0f;
491 F_invr *= vdw_in_range;
493 E_lj_p *= vdw_in_range;
495 #endif /* VDW_CUTOFF_CHECK */
504 #ifdef EXCLUSION_FORCES
505 F_invr += qi * qj_f * int_bit * inv_r2 * inv_r;
507 F_invr += qi * qj_f * inv_r2 * inv_r;
511 F_invr += qi * qj_f * (int_bit*inv_r2 * inv_r - two_k_rf);
513 #if defined EL_EWALD_ANA
514 F_invr += qi * qj_f * (int_bit*inv_r2*inv_r + pmecorrF(beta2*r2)*beta3);
515 #elif defined EL_EWALD_TAB
516 F_invr += qi * qj_f * (int_bit*inv_r2 -
517 interpolate_coulomb_force_r(coulomb_tab_climg2d, r2 * inv_r, coulomb_tab_scale)
519 #endif /* EL_EWALD_ANA/TAB */
523 E_el += qi * qj_f * (int_bit*inv_r - c_rf);
526 E_el += qi * qj_f * (int_bit*inv_r + 0.5f * two_k_rf * r2 - c_rf);
529 /* 1.0f - erff is faster than erfcf */
530 E_el += qi * qj_f * (inv_r * (int_bit - erf(r2 * inv_r * beta)) - int_bit * ewald_shift);
531 #endif /* EL_EWALD_ANY */
533 const float3 f_ij = rv * F_invr;
535 /* accumulate j forces in registers */
538 /* accumulate i forces in registers */
543 /* shift the mask bit by 1 */
547 /* reduce j forces */
548 reduce_force_j(f_buf, fcj_buf, f, tidxi, tidxj, aj);
552 /* Update the imask with the new one which does not contain the
553 out of range clusters anymore. */
555 pl_cj4[j4].imei[widx].imask = imask;
560 /* skip central shifts when summing shift forces */
561 if (nb_sci.shift == CENTRAL)
565 /* reduce i forces */
566 reduce_force_i_and_shift(f_buf, fci_buf, f, bCalcFshift != 0, tidxi, tidxj, sci,
567 nb_sci.shift, fshift);
570 reduce_energy(f_buf, E_lj, E_el, e_lj, e_el, tidx);
575 #undef EXCLUSION_FORCES
580 #undef USE_CJ_PREFETCH