2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
41 /* Unpack pointers for output */
42 real
* f
= out
->f
.data();
43 real
* fshift
= out
->fshift
.data();
46 real
* Vvdw
= out
->VSvdw
.data();
47 real
* Vc
= out
->VSc
.data();
49 real
* Vvdw
= out
->Vvdw
.data();
50 real
* Vc
= out
->Vc
.data();
54 const nbnxn_cj_t
* l_cj
;
57 gmx_bool do_LJ
, half_LJ
, do_coul
;
58 int cjind0
, cjind1
, cjind
;
62 int egps_ishift
, egps_imask
;
63 int egps_jshift
, egps_jmask
, egps_jstride
;
65 real
* vvdwtp
[UNROLLI
];
72 SimdReal ix_S0
, iy_S0
, iz_S0
;
73 SimdReal ix_S1
, iy_S1
, iz_S1
;
74 SimdReal ix_S2
, iy_S2
, iz_S2
;
75 SimdReal ix_S3
, iy_S3
, iz_S3
;
76 SimdReal fix_S0
, fiy_S0
, fiz_S0
;
77 SimdReal fix_S1
, fiy_S1
, fiz_S1
;
78 SimdReal fix_S2
, fiy_S2
, fiz_S2
;
79 SimdReal fix_S3
, fiy_S3
, fiz_S3
;
81 SimdReal diagonal_jmi_S
;
82 #if UNROLLI == UNROLLJ
83 SimdBool diagonal_mask_S0
, diagonal_mask_S1
, diagonal_mask_S2
, diagonal_mask_S3
;
85 SimdBool diagonal_mask0_S0
, diagonal_mask0_S1
, diagonal_mask0_S2
, diagonal_mask0_S3
;
86 SimdBool diagonal_mask1_S0
, diagonal_mask1_S1
, diagonal_mask1_S2
, diagonal_mask1_S3
;
89 SimdBitMask filter_S0
, filter_S1
, filter_S2
, filter_S3
;
94 SimdReal iq_S0
= setZero();
95 SimdReal iq_S1
= setZero();
96 SimdReal iq_S2
= setZero();
97 SimdReal iq_S3
= setZero();
101 # ifdef CALC_ENERGIES
102 SimdReal hrc_3_S
, moh_rc_S
;
107 /* Coulomb table variables */
109 const real
* tab_coul_F
;
110 # if defined CALC_ENERGIES && !defined TAB_FDV0
111 const real gmx_unused
* tab_coul_V
;
113 # ifdef CALC_ENERGIES
118 #ifdef CALC_COUL_EWALD
119 SimdReal beta2_S
, beta_S
;
122 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
126 #if defined LJ_CUT && defined CALC_ENERGIES
127 SimdReal p6_cpot_S
, p12_cpot_S
;
131 SimdReal swV3_S
, swV4_S
, swV5_S
;
132 SimdReal swF2_S
, swF3_S
, swF4_S
;
134 #ifdef LJ_FORCE_SWITCH
136 SimdReal p6_fc2_S
, p6_fc3_S
;
137 SimdReal p12_fc2_S
, p12_fc3_S
;
138 # ifdef CALC_ENERGIES
139 SimdReal p6_vc3_S
, p6_vc4_S
;
140 SimdReal p12_vc3_S
, p12_vc4_S
;
141 SimdReal p6_6cpot_S
, p12_12cpot_S
;
145 real lj_ewaldcoeff2
, lj_ewaldcoeff6_6
;
146 SimdReal half_S
, lje_c2_S
, lje_c6_6_S
;
150 SimdReal hsig_i_S0
, seps_i_S0
;
151 SimdReal hsig_i_S1
, seps_i_S1
;
152 SimdReal hsig_i_S2
, seps_i_S2
;
153 SimdReal hsig_i_S3
, seps_i_S3
;
154 #endif /* LJ_COMB_LB */
158 #ifdef VDW_CUTOFF_CHECK
168 const nbnxn_atomdata_t::Params
& nbatParams
= nbat
->params();
170 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined LJ_EWALD_GEOM
171 const real
* gmx_restrict ljc
= nbatParams
.lj_comb
.data();
173 #if !(defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined FIX_LJ_C)
174 /* No combination rule used */
175 const real
* gmx_restrict nbfp_ptr
= nbatParams
.nbfp_aligned
.data();
176 const int* gmx_restrict type
= nbatParams
.type
.data();
179 /* Load j-i for the first i */
180 diagonal_jmi_S
= load
<SimdReal
>(nbat
->simdMasks
.diagonal_4xn_j_minus_i
.data());
181 /* Generate all the diagonal masks as comparison results */
182 #if UNROLLI == UNROLLJ
183 diagonal_mask_S0
= (zero_S
< diagonal_jmi_S
);
184 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
185 diagonal_mask_S1
= (zero_S
< diagonal_jmi_S
);
186 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
187 diagonal_mask_S2
= (zero_S
< diagonal_jmi_S
);
188 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
189 diagonal_mask_S3
= (zero_S
< diagonal_jmi_S
);
191 # if UNROLLI == 2 * UNROLLJ || 2 * UNROLLI == UNROLLJ
192 diagonal_mask0_S0
= (zero_S
< diagonal_jmi_S
);
193 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
194 diagonal_mask0_S1
= (zero_S
< diagonal_jmi_S
);
195 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
196 diagonal_mask0_S2
= (zero_S
< diagonal_jmi_S
);
197 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
198 diagonal_mask0_S3
= (zero_S
< diagonal_jmi_S
);
199 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
201 # if UNROLLI == 2 * UNROLLJ
202 /* Load j-i for the second half of the j-cluster */
203 diagonal_jmi_S
= load
<SimdReal
>(nbat
->simdMasks
.diagonal_4xn_j_minus_i
.data() + UNROLLJ
);
206 diagonal_mask1_S0
= (zero_S
< diagonal_jmi_S
);
207 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
208 diagonal_mask1_S1
= (zero_S
< diagonal_jmi_S
);
209 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
210 diagonal_mask1_S2
= (zero_S
< diagonal_jmi_S
);
211 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
212 diagonal_mask1_S3
= (zero_S
< diagonal_jmi_S
);
216 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
217 const std::uint64_t* gmx_restrict exclusion_filter
= nbat
->simdMasks
.exclusion_filter64
.data();
219 const std::uint32_t* gmx_restrict exclusion_filter
= nbat
->simdMasks
.exclusion_filter
.data();
222 /* Here we cast the exclusion filters from unsigned * to int * or real *.
223 * Since we only check bits, the actual value they represent does not
224 * matter, as long as both filter and mask data are treated the same way.
226 #if GMX_SIMD_HAVE_INT32_LOGICAL
227 filter_S0
= load
<SimdBitMask
>(reinterpret_cast<const int*>(exclusion_filter
+ 0 * UNROLLJ
));
228 filter_S1
= load
<SimdBitMask
>(reinterpret_cast<const int*>(exclusion_filter
+ 1 * UNROLLJ
));
229 filter_S2
= load
<SimdBitMask
>(reinterpret_cast<const int*>(exclusion_filter
+ 2 * UNROLLJ
));
230 filter_S3
= load
<SimdBitMask
>(reinterpret_cast<const int*>(exclusion_filter
+ 3 * UNROLLJ
));
232 filter_S0
= load
<SimdBitMask
>(reinterpret_cast<const real
*>(exclusion_filter
+ 0 * UNROLLJ
));
233 filter_S1
= load
<SimdBitMask
>(reinterpret_cast<const real
*>(exclusion_filter
+ 1 * UNROLLJ
));
234 filter_S2
= load
<SimdBitMask
>(reinterpret_cast<const real
*>(exclusion_filter
+ 2 * UNROLLJ
));
235 filter_S3
= load
<SimdBitMask
>(reinterpret_cast<const real
*>(exclusion_filter
+ 3 * UNROLLJ
));
239 /* Reaction-field constants */
240 mrc_3_S
= SimdReal(-2 * ic
->k_rf
);
241 # ifdef CALC_ENERGIES
242 hrc_3_S
= SimdReal(ic
->k_rf
);
243 moh_rc_S
= SimdReal(-ic
->c_rf
);
249 invtsp_S
= SimdReal(ic
->coulombEwaldTables
->scale
);
250 # ifdef CALC_ENERGIES
251 mhalfsp_S
= SimdReal(-0.5_real
/ ic
->coulombEwaldTables
->scale
);
255 tab_coul_F
= ic
->coulombEwaldTables
->tableFDV0
.data();
257 tab_coul_F
= ic
->coulombEwaldTables
->tableF
.data();
258 # ifdef CALC_ENERGIES
259 tab_coul_V
= ic
->coulombEwaldTables
->tableV
.data();
262 #endif /* CALC_COUL_TAB */
264 #ifdef CALC_COUL_EWALD
265 beta2_S
= SimdReal(ic
->ewaldcoeff_q
* ic
->ewaldcoeff_q
);
266 beta_S
= SimdReal(ic
->ewaldcoeff_q
);
269 #if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
270 sh_ewald_S
= SimdReal(ic
->sh_ewald
);
273 /* LJ function constants */
274 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
275 SimdReal
sixth_S(1.0 / 6.0);
276 SimdReal
twelveth_S(1.0 / 12.0);
279 #if defined LJ_CUT && defined CALC_ENERGIES
280 /* We shift the potential by cpot, which can be zero */
281 p6_cpot_S
= SimdReal(ic
->dispersion_shift
.cpot
);
282 p12_cpot_S
= SimdReal(ic
->repulsion_shift
.cpot
);
285 rswitch_S
= SimdReal(ic
->rvdw_switch
);
286 swV3_S
= SimdReal(ic
->vdw_switch
.c3
);
287 swV4_S
= SimdReal(ic
->vdw_switch
.c4
);
288 swV5_S
= SimdReal(ic
->vdw_switch
.c5
);
289 swF2_S
= SimdReal(3 * ic
->vdw_switch
.c3
);
290 swF3_S
= SimdReal(4 * ic
->vdw_switch
.c4
);
291 swF4_S
= SimdReal(5 * ic
->vdw_switch
.c5
);
293 #ifdef LJ_FORCE_SWITCH
294 rswitch_S
= SimdReal(ic
->rvdw_switch
);
295 p6_fc2_S
= SimdReal(ic
->dispersion_shift
.c2
);
296 p6_fc3_S
= SimdReal(ic
->dispersion_shift
.c3
);
297 p12_fc2_S
= SimdReal(ic
->repulsion_shift
.c2
);
298 p12_fc3_S
= SimdReal(ic
->repulsion_shift
.c3
);
299 # ifdef CALC_ENERGIES
301 SimdReal
mthird_S(-1.0 / 3.0);
302 SimdReal
mfourth_S(-1.0 / 4.0);
304 p6_vc3_S
= mthird_S
* p6_fc2_S
;
305 p6_vc4_S
= mfourth_S
* p6_fc3_S
;
306 p6_6cpot_S
= SimdReal(ic
->dispersion_shift
.cpot
/ 6);
307 p12_vc3_S
= mthird_S
* p12_fc2_S
;
308 p12_vc4_S
= mfourth_S
* p12_fc3_S
;
309 p12_12cpot_S
= SimdReal(ic
->repulsion_shift
.cpot
/ 12);
314 half_S
= SimdReal(0.5);
315 lj_ewaldcoeff2
= ic
->ewaldcoeff_lj
* ic
->ewaldcoeff_lj
;
316 lj_ewaldcoeff6_6
= lj_ewaldcoeff2
* lj_ewaldcoeff2
* lj_ewaldcoeff2
/ 6;
317 lje_c2_S
= SimdReal(lj_ewaldcoeff2
);
318 lje_c6_6_S
= SimdReal(lj_ewaldcoeff6_6
);
319 # ifdef CALC_ENERGIES
320 /* Determine the grid potential at the cut-off */
321 SimdReal
lje_vc_S(ic
->sh_lj_ewald
);
325 /* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
326 rc2_S
= SimdReal(ic
->rcoulomb
* ic
->rcoulomb
);
327 #ifdef VDW_CUTOFF_CHECK
328 rcvdw2_S
= SimdReal(ic
->rvdw
* ic
->rvdw
);
331 minRsq_S
= SimdReal(c_nbnxnMinDistanceSquared
);
333 const real
* gmx_restrict q
= nbatParams
.q
.data();
334 const real facel
= ic
->epsfac
;
335 const real
* gmx_restrict shiftvec
= shift_vec
[0];
336 const real
* gmx_restrict x
= nbat
->x().data();
339 alignas(GMX_SIMD_ALIGNMENT
) real pvdw_c6
[2 * UNROLLI
* UNROLLJ
];
340 real
* pvdw_c12
= pvdw_c6
+ UNROLLI
* UNROLLJ
;
342 for (int jp
= 0; jp
< UNROLLJ
; jp
++)
344 pvdw_c6
[0 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
345 pvdw_c6
[1 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
346 pvdw_c6
[2 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
347 pvdw_c6
[3 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
349 pvdw_c12
[0 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
350 pvdw_c12
[1 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
351 pvdw_c12
[2 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
352 pvdw_c12
[3 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
354 SimdReal c6_S0
= load
<SimdReal
>(pvdw_c6
+ 0 * UNROLLJ
);
355 SimdReal c6_S1
= load
<SimdReal
>(pvdw_c6
+ 1 * UNROLLJ
);
356 SimdReal c6_S2
= load
<SimdReal
>(pvdw_c6
+ 2 * UNROLLJ
);
357 SimdReal c6_S3
= load
<SimdReal
>(pvdw_c6
+ 3 * UNROLLJ
);
359 SimdReal c12_S0
= load
<SimdReal
>(pvdw_c12
+ 0 * UNROLLJ
);
360 SimdReal c12_S1
= load
<SimdReal
>(pvdw_c12
+ 1 * UNROLLJ
);
361 SimdReal c12_S2
= load
<SimdReal
>(pvdw_c12
+ 2 * UNROLLJ
);
362 SimdReal c12_S3
= load
<SimdReal
>(pvdw_c12
+ 3 * UNROLLJ
);
363 #endif /* FIX_LJ_C */
366 egps_ishift
= nbatParams
.neg_2log
;
367 egps_imask
= (1 << egps_ishift
) - 1;
368 egps_jshift
= 2 * nbatParams
.neg_2log
;
369 egps_jmask
= (1 << egps_jshift
) - 1;
370 egps_jstride
= (UNROLLJ
>> 1) * UNROLLJ
;
371 /* Major division is over i-particle energy groups, determine the stride */
372 Vstride_i
= nbatParams
.nenergrp
* (1 << nbatParams
.neg_2log
) * egps_jstride
;
375 l_cj
= nbl
->cj
.data();
379 for (const nbnxn_ci_t
& ciEntry
: nbl
->ci
)
381 ish
= (ciEntry
.shift
& NBNXN_CI_SHIFT
);
383 cjind0
= ciEntry
.cj_ind_start
;
384 cjind1
= ciEntry
.cj_ind_end
;
386 ci_sh
= (ish
== CENTRAL
? ci
: -1);
388 shX_S
= SimdReal(shiftvec
[ish3
]);
389 shY_S
= SimdReal(shiftvec
[ish3
+ 1]);
390 shZ_S
= SimdReal(shiftvec
[ish3
+ 2]);
393 int sci
= ci
* STRIDE
;
394 int scix
= sci
* DIM
;
395 # if defined LJ_COMB_LB || defined LJ_COMB_GEOM || defined LJ_EWALD_GEOM
399 int sci
= (ci
>> 1) * STRIDE
;
400 int scix
= sci
* DIM
+ (ci
& 1) * (STRIDE
>> 1);
401 # if defined LJ_COMB_LB || defined LJ_COMB_GEOM || defined LJ_EWALD_GEOM
402 int sci2
= sci
* 2 + (ci
& 1) * (STRIDE
>> 1);
404 sci
+= (ci
& 1) * (STRIDE
>> 1);
407 /* We have 5 LJ/C combinations, but use only three inner loops,
408 * as the other combinations are unlikely and/or not much faster:
409 * inner half-LJ + C for half-LJ + C / no-LJ + C
410 * inner LJ + C for full-LJ + C
411 * inner LJ for full-LJ + no-C / half-LJ + no-C
413 do_LJ
= ((ciEntry
.shift
& NBNXN_CI_DO_LJ(0)) != 0);
414 do_coul
= ((ciEntry
.shift
& NBNXN_CI_DO_COUL(0)) != 0);
415 half_LJ
= (((ciEntry
.shift
& NBNXN_CI_HALF_LJ(0)) != 0) || !do_LJ
) && do_coul
;
418 egps_i
= nbatParams
.energrp
[ci
];
422 for (ia
= 0; ia
< UNROLLI
; ia
++)
424 egp_ia
= (egps_i
>> (ia
* egps_ishift
)) & egps_imask
;
425 vvdwtp
[ia
] = Vvdw
+ egp_ia
* Vstride_i
;
426 vctp
[ia
] = Vc
+ egp_ia
* Vstride_i
;
432 # ifdef LJ_EWALD_GEOM
433 gmx_bool do_self
= TRUE
;
435 gmx_bool do_self
= do_coul
;
438 if (do_self
&& l_cj
[ciEntry
.cj_ind_start
].cj
== ci_sh
)
441 if (do_self
&& l_cj
[ciEntry
.cj_ind_start
].cj
== (ci_sh
<< 1))
444 if (do_self
&& l_cj
[ciEntry
.cj_ind_start
].cj
== (ci_sh
>> 1))
453 Vc_sub_self
= 0.5 * ic
->c_rf
;
455 # ifdef CALC_COUL_TAB
457 Vc_sub_self
= 0.5 * tab_coul_F
[2];
459 Vc_sub_self
= 0.5 * tab_coul_V
[0];
462 # ifdef CALC_COUL_EWALD
464 Vc_sub_self
= 0.5 * ic
->ewaldcoeff_q
* M_2_SQRTPI
;
467 for (ia
= 0; ia
< UNROLLI
; ia
++)
472 # ifdef ENERGY_GROUPS
473 vctp
[ia
][((egps_i
>> (ia
* egps_ishift
)) & egps_imask
) * egps_jstride
]
477 -= facel
* qi
* qi
* Vc_sub_self
;
481 # ifdef LJ_EWALD_GEOM
485 for (ia
= 0; ia
< UNROLLI
; ia
++)
489 c6_i
= nbatParams
.nbfp
[nbatParams
.type
[sci
+ ia
] * (nbatParams
.numTypes
+ 1) * 2]
491 # ifdef ENERGY_GROUPS
492 vvdwtp
[ia
][((egps_i
>> (ia
* egps_ishift
)) & egps_imask
) * egps_jstride
]
496 += 0.5 * c6_i
* lj_ewaldcoeff6_6
;
499 # endif /* LJ_EWALD_GEOM */
503 /* Load i atom data */
504 int sciy
= scix
+ STRIDE
;
505 int sciz
= sciy
+ STRIDE
;
506 ix_S0
= SimdReal(x
[scix
]) + shX_S
;
507 ix_S1
= SimdReal(x
[scix
+ 1]) + shX_S
;
508 ix_S2
= SimdReal(x
[scix
+ 2]) + shX_S
;
509 ix_S3
= SimdReal(x
[scix
+ 3]) + shX_S
;
510 iy_S0
= SimdReal(x
[sciy
]) + shY_S
;
511 iy_S1
= SimdReal(x
[sciy
+ 1]) + shY_S
;
512 iy_S2
= SimdReal(x
[sciy
+ 2]) + shY_S
;
513 iy_S3
= SimdReal(x
[sciy
+ 3]) + shY_S
;
514 iz_S0
= SimdReal(x
[sciz
]) + shZ_S
;
515 iz_S1
= SimdReal(x
[sciz
+ 1]) + shZ_S
;
516 iz_S2
= SimdReal(x
[sciz
+ 2]) + shZ_S
;
517 iz_S3
= SimdReal(x
[sciz
+ 3]) + shZ_S
;
521 iq_S0
= SimdReal(facel
* q
[sci
]);
522 iq_S1
= SimdReal(facel
* q
[sci
+ 1]);
523 iq_S2
= SimdReal(facel
* q
[sci
+ 2]);
524 iq_S3
= SimdReal(facel
* q
[sci
+ 3]);
528 hsig_i_S0
= SimdReal(ljc
[sci2
+ 0]);
529 hsig_i_S1
= SimdReal(ljc
[sci2
+ 1]);
530 hsig_i_S2
= SimdReal(ljc
[sci2
+ 2]);
531 hsig_i_S3
= SimdReal(ljc
[sci2
+ 3]);
532 seps_i_S0
= SimdReal(ljc
[sci2
+ STRIDE
+ 0]);
533 seps_i_S1
= SimdReal(ljc
[sci2
+ STRIDE
+ 1]);
534 seps_i_S2
= SimdReal(ljc
[sci2
+ STRIDE
+ 2]);
535 seps_i_S3
= SimdReal(ljc
[sci2
+ STRIDE
+ 3]);
538 SimdReal c6s_S0
= SimdReal(ljc
[sci2
+ 0]);
539 SimdReal c6s_S1
= SimdReal(ljc
[sci2
+ 1]);
540 SimdReal c6s_S2
, c6s_S3
;
543 c6s_S2
= SimdReal(ljc
[sci2
+ 2]);
544 c6s_S3
= SimdReal(ljc
[sci2
+ 3]);
551 SimdReal c12s_S0
= SimdReal(ljc
[sci2
+ STRIDE
+ 0]);
552 SimdReal c12s_S1
= SimdReal(ljc
[sci2
+ STRIDE
+ 1]);
553 SimdReal c12s_S2
, c12s_S3
;
556 c12s_S2
= SimdReal(ljc
[sci2
+ STRIDE
+ 2]);
557 c12s_S3
= SimdReal(ljc
[sci2
+ STRIDE
+ 3]);
565 const int numTypes
= nbatParams
.numTypes
;
566 const real
* nbfp0
= nbfp_ptr
+ type
[sci
] * numTypes
* c_simdBestPairAlignment
;
567 const real
* nbfp1
= nbfp_ptr
+ type
[sci
+ 1] * numTypes
* c_simdBestPairAlignment
;
568 const real
*nbfp2
= nullptr, *nbfp3
= nullptr;
571 nbfp2
= nbfp_ptr
+ type
[sci
+ 2] * numTypes
* c_simdBestPairAlignment
;
572 nbfp3
= nbfp_ptr
+ type
[sci
+ 3] * numTypes
* c_simdBestPairAlignment
;
577 /* We need the geometrically combined C6 for the PME grid correction */
578 SimdReal c6s_S0
= SimdReal(ljc
[sci2
+ 0]);
579 SimdReal c6s_S1
= SimdReal(ljc
[sci2
+ 1]);
580 SimdReal c6s_S2
, c6s_S3
;
583 c6s_S2
= SimdReal(ljc
[sci2
+ 2]);
584 c6s_S3
= SimdReal(ljc
[sci2
+ 3]);
588 /* Zero the potential energy for this list */
590 SimdReal Vvdwtot_S
= setZero();
591 SimdReal vctot_S
= setZero();
594 /* Clear i atom forces */
610 /* Currently all kernels use (at least half) LJ */
614 /* Coulomb: all i-atoms, LJ: first half i-atoms */
618 while (cjind
< cjind1
&& nbl
->cj
[cjind
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
620 #include "kernel_inner.h"
624 for (; (cjind
< cjind1
); cjind
++)
626 #include "kernel_inner.h"
633 /* Coulomb: all i-atoms, LJ: all i-atoms */
636 while (cjind
< cjind1
&& nbl
->cj
[cjind
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
638 #include "kernel_inner.h"
642 for (; (cjind
< cjind1
); cjind
++)
644 #include "kernel_inner.h"
650 /* Coulomb: none, LJ: all i-atoms */
652 while (cjind
< cjind1
&& nbl
->cj
[cjind
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
654 #include "kernel_inner.h"
658 for (; (cjind
< cjind1
); cjind
++)
660 #include "kernel_inner.h"
664 ninner
+= cjind1
- cjind0
;
666 /* Add accumulated i-forces to the force array */
667 real fShiftX
= reduceIncr4ReturnSum(f
+ scix
, fix_S0
, fix_S1
, fix_S2
, fix_S3
);
668 real fShiftY
= reduceIncr4ReturnSum(f
+ sciy
, fiy_S0
, fiy_S1
, fiy_S2
, fiy_S3
);
669 real fShiftZ
= reduceIncr4ReturnSum(f
+ sciz
, fiz_S0
, fiz_S1
, fiz_S2
, fiz_S3
);
672 #ifdef CALC_SHIFTFORCES
673 fshift
[ish3
+ 0] += fShiftX
;
674 fshift
[ish3
+ 1] += fShiftY
;
675 fshift
[ish3
+ 2] += fShiftZ
;
681 *Vc
+= reduce(vctot_S
);
684 *Vvdw
+= reduce(Vvdwtot_S
);
687 /* Outer loop uses 6 flops/iteration */
691 printf("atom pairs %d\n", npair
);