2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
41 /* Unpack pointers for output */
42 real
* f
= out
->f
.data();
43 real
* fshift
= out
->fshift
.data();
46 real
* Vvdw
= out
->VSvdw
.data();
47 real
* Vc
= out
->VSc
.data();
49 real
* Vvdw
= out
->Vvdw
.data();
50 real
* Vc
= out
->Vc
.data();
54 const nbnxn_cj_t
* l_cj
;
57 gmx_bool do_LJ
, half_LJ
, do_coul
;
58 int cjind0
, cjind1
, cjind
;
62 int egps_ishift
, egps_imask
;
63 int egps_jshift
, egps_jmask
, egps_jstride
;
65 real
* vvdwtp
[UNROLLI
];
72 SimdReal ix_S0
, iy_S0
, iz_S0
;
73 SimdReal ix_S2
, iy_S2
, iz_S2
;
74 SimdReal fix_S0
, fiy_S0
, fiz_S0
;
75 SimdReal fix_S2
, fiy_S2
, fiz_S2
;
77 SimdReal diagonal_jmi_S
;
78 #if UNROLLI == UNROLLJ
79 SimdBool diagonal_mask_S0
, diagonal_mask_S2
;
81 SimdBool diagonal_mask0_S0
, diagonal_mask0_S2
;
82 SimdBool diagonal_mask1_S0
, diagonal_mask1_S2
;
85 SimdBitMask filter_S0
, filter_S2
;
90 SimdReal iq_S0
= setZero();
91 SimdReal iq_S2
= setZero();
96 SimdReal hrc_3_S
, moh_rc_S
;
101 /* Coulomb table variables */
103 const real
* tab_coul_F
;
104 # if defined CALC_ENERGIES && !defined TAB_FDV0
105 const real
* tab_coul_V
;
108 # ifdef CALC_ENERGIES
113 #ifdef CALC_COUL_EWALD
114 SimdReal beta2_S
, beta_S
;
117 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
121 #if defined LJ_CUT && defined CALC_ENERGIES
122 SimdReal p6_cpot_S
, p12_cpot_S
;
126 SimdReal swV3_S
, swV4_S
, swV5_S
;
127 SimdReal swF2_S
, swF3_S
, swF4_S
;
129 #ifdef LJ_FORCE_SWITCH
131 SimdReal p6_fc2_S
, p6_fc3_S
;
132 SimdReal p12_fc2_S
, p12_fc3_S
;
133 # ifdef CALC_ENERGIES
134 SimdReal p6_vc3_S
, p6_vc4_S
;
135 SimdReal p12_vc3_S
, p12_vc4_S
;
136 SimdReal p6_6cpot_S
, p12_12cpot_S
;
140 real lj_ewaldcoeff2
, lj_ewaldcoeff6_6
;
141 SimdReal half_S
, lje_c2_S
, lje_c6_6_S
;
145 SimdReal hsig_i_S0
, seps_i_S0
;
146 SimdReal hsig_i_S2
, seps_i_S2
;
149 alignas(GMX_SIMD_ALIGNMENT
) real pvdw_c6
[2 * UNROLLI
* UNROLLJ
];
150 real
* pvdw_c12
= pvdw_c6
+ UNROLLI
* UNROLLJ
;
152 #endif /* LJ_COMB_LB */
156 #ifdef VDW_CUTOFF_CHECK
166 const nbnxn_atomdata_t::Params
& nbatParams
= nbat
->params();
168 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined LJ_EWALD_GEOM
169 const real
* gmx_restrict ljc
= nbatParams
.lj_comb
.data();
171 #if !(defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined FIX_LJ_C)
172 /* No combination rule used */
173 const real
* gmx_restrict nbfp_ptr
= nbatParams
.nbfp_aligned
.data();
174 const int* gmx_restrict type
= nbatParams
.type
.data();
177 /* Load j-i for the first i */
178 diagonal_jmi_S
= load
<SimdReal
>(nbat
->simdMasks
.diagonal_2xnn_j_minus_i
.data());
179 /* Generate all the diagonal masks as comparison results */
180 #if UNROLLI == UNROLLJ
181 diagonal_mask_S0
= (zero_S
< diagonal_jmi_S
);
182 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
183 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
184 diagonal_mask_S2
= (zero_S
< diagonal_jmi_S
);
186 # if 2 * UNROLLI == UNROLLJ
187 diagonal_mask0_S0
= (zero_S
< diagonal_jmi_S
);
188 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
189 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
190 diagonal_mask0_S2
= (zero_S
< diagonal_jmi_S
);
191 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
192 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
193 diagonal_mask1_S0
= (zero_S
< diagonal_jmi_S
);
194 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
195 diagonal_jmi_S
= diagonal_jmi_S
- one_S
;
196 diagonal_mask1_S2
= (zero_S
< diagonal_jmi_S
);
200 /* Load masks for topology exclusion masking. filter_stride is
201 static const, so the conditional will be optimized away. */
202 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
203 const std::uint64_t* gmx_restrict exclusion_filter
= nbat
->simdMasks
.exclusion_filter64
.data();
205 const std::uint32_t* gmx_restrict exclusion_filter
= nbat
->simdMasks
.exclusion_filter
.data();
208 /* Here we cast the exclusion filters from unsigned * to int * or real *.
209 * Since we only check bits, the actual value they represent does not
210 * matter, as long as both filter and mask data are treated the same way.
212 #if GMX_SIMD_HAVE_INT32_LOGICAL
213 filter_S0
= load
<SimdBitMask
>(reinterpret_cast<const int*>(exclusion_filter
+ 0 * UNROLLJ
));
214 filter_S2
= load
<SimdBitMask
>(reinterpret_cast<const int*>(exclusion_filter
+ 2 * UNROLLJ
));
216 filter_S0
= load
<SimdBitMask
>(reinterpret_cast<const real
*>(exclusion_filter
+ 0 * UNROLLJ
));
217 filter_S2
= load
<SimdBitMask
>(reinterpret_cast<const real
*>(exclusion_filter
+ 2 * UNROLLJ
));
221 /* Reaction-field constants */
222 mrc_3_S
= SimdReal(-2 * ic
->k_rf
);
223 # ifdef CALC_ENERGIES
224 hrc_3_S
= SimdReal(ic
->k_rf
);
225 moh_rc_S
= SimdReal(-ic
->c_rf
);
231 invtsp_S
= SimdReal(ic
->coulombEwaldTables
->scale
);
232 # ifdef CALC_ENERGIES
233 mhalfsp_S
= SimdReal(-0.5_real
/ ic
->coulombEwaldTables
->scale
);
237 tab_coul_F
= ic
->coulombEwaldTables
->tableFDV0
.data();
239 tab_coul_F
= ic
->coulombEwaldTables
->tableF
.data();
240 # ifdef CALC_ENERGIES
241 tab_coul_V
= ic
->coulombEwaldTables
->tableV
.data();
244 #endif /* CALC_COUL_TAB */
246 #ifdef CALC_COUL_EWALD
247 beta2_S
= SimdReal(ic
->ewaldcoeff_q
* ic
->ewaldcoeff_q
);
248 beta_S
= SimdReal(ic
->ewaldcoeff_q
);
251 #if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
252 sh_ewald_S
= SimdReal(ic
->sh_ewald
);
255 /* LJ function constants */
256 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
257 SimdReal sixth_S
= SimdReal(1.0 / 6.0);
258 SimdReal twelveth_S
= SimdReal(1.0 / 12.0);
261 #if defined LJ_CUT && defined CALC_ENERGIES
262 /* We shift the potential by cpot, which can be zero */
263 p6_cpot_S
= SimdReal(ic
->dispersion_shift
.cpot
);
264 p12_cpot_S
= SimdReal(ic
->repulsion_shift
.cpot
);
267 rswitch_S
= SimdReal(ic
->rvdw_switch
);
268 swV3_S
= SimdReal(ic
->vdw_switch
.c3
);
269 swV4_S
= SimdReal(ic
->vdw_switch
.c4
);
270 swV5_S
= SimdReal(ic
->vdw_switch
.c5
);
271 swF2_S
= SimdReal(3 * ic
->vdw_switch
.c3
);
272 swF3_S
= SimdReal(4 * ic
->vdw_switch
.c4
);
273 swF4_S
= SimdReal(5 * ic
->vdw_switch
.c5
);
275 #ifdef LJ_FORCE_SWITCH
276 rswitch_S
= SimdReal(ic
->rvdw_switch
);
277 p6_fc2_S
= SimdReal(ic
->dispersion_shift
.c2
);
278 p6_fc3_S
= SimdReal(ic
->dispersion_shift
.c3
);
279 p12_fc2_S
= SimdReal(ic
->repulsion_shift
.c2
);
280 p12_fc3_S
= SimdReal(ic
->repulsion_shift
.c3
);
281 # ifdef CALC_ENERGIES
283 SimdReal mthird_S
= SimdReal(-1.0 / 3.0);
284 SimdReal mfourth_S
= SimdReal(-1.0 / 4.0);
286 p6_vc3_S
= mthird_S
* p6_fc2_S
;
287 p6_vc4_S
= mfourth_S
* p6_fc3_S
;
288 p6_6cpot_S
= SimdReal(ic
->dispersion_shift
.cpot
/ 6);
289 p12_vc3_S
= mthird_S
* p12_fc2_S
;
290 p12_vc4_S
= mfourth_S
* p12_fc3_S
;
291 p12_12cpot_S
= SimdReal(ic
->repulsion_shift
.cpot
/ 12);
296 half_S
= SimdReal(0.5);
297 lj_ewaldcoeff2
= ic
->ewaldcoeff_lj
* ic
->ewaldcoeff_lj
;
298 lj_ewaldcoeff6_6
= lj_ewaldcoeff2
* lj_ewaldcoeff2
* lj_ewaldcoeff2
/ 6;
299 lje_c2_S
= SimdReal(lj_ewaldcoeff2
);
300 lje_c6_6_S
= SimdReal(lj_ewaldcoeff6_6
);
301 # ifdef CALC_ENERGIES
302 /* Determine the grid potential at the cut-off */
303 SimdReal lje_vc_S
= SimdReal(ic
->sh_lj_ewald
);
307 /* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
308 rc2_S
= SimdReal(ic
->rcoulomb
* ic
->rcoulomb
);
309 #ifdef VDW_CUTOFF_CHECK
310 rcvdw2_S
= SimdReal(ic
->rvdw
* ic
->rvdw
);
313 minRsq_S
= SimdReal(c_nbnxnMinDistanceSquared
);
315 const real
* gmx_restrict q
= nbatParams
.q
.data();
316 const real facel
= ic
->epsfac
;
317 const real
* gmx_restrict shiftvec
= shift_vec
[0];
318 const real
* gmx_restrict x
= nbat
->x().data();
322 for (jp
= 0; jp
< UNROLLJ
; jp
++)
324 pvdw_c6
[0 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
325 pvdw_c6
[1 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
326 pvdw_c6
[2 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
327 pvdw_c6
[3 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2];
329 pvdw_c12
[0 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
330 pvdw_c12
[1 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
331 pvdw_c12
[2 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
332 pvdw_c12
[3 * UNROLLJ
+ jp
] = nbat
->nbfp
[0 * 2 + 1];
334 SimdReal c6_S0
= load
<SimdReal
>(pvdw_c6
+ 0 * UNROLLJ
);
335 SimdReal c6_S1
= load
<SimdReal
>(pvdw_c6
+ 1 * UNROLLJ
);
336 SimdReal c6_S2
= load
<SimdReal
>(pvdw_c6
+ 2 * UNROLLJ
);
337 SimdReal c6_S3
= load
<SimdReal
>(pvdw_c6
+ 3 * UNROLLJ
);
339 SimdReal c12_S0
= load
<SimdReal
>(pvdw_c12
+ 0 * UNROLLJ
);
340 SimdReal c12_S1
= load
<SimdReal
>(pvdw_c12
+ 1 * UNROLLJ
);
341 SimdReal c12_S2
= load
<SimdReal
>(pvdw_c12
+ 2 * UNROLLJ
);
342 SimdReal c12_S3
= load
<SimdReal
>(pvdw_c12
+ 3 * UNROLLJ
);
343 #endif /* FIX_LJ_C */
346 egps_ishift
= nbatParams
.neg_2log
;
347 egps_imask
= (1 << egps_ishift
) - 1;
348 egps_jshift
= 2 * nbatParams
.neg_2log
;
349 egps_jmask
= (1 << egps_jshift
) - 1;
350 egps_jstride
= (UNROLLJ
>> 1) * UNROLLJ
;
351 /* Major division is over i-particle energy groups, determine the stride */
352 Vstride_i
= nbatParams
.nenergrp
* (1 << nbatParams
.neg_2log
) * egps_jstride
;
355 l_cj
= nbl
->cj
.data();
358 for (const nbnxn_ci_t
& ciEntry
: nbl
->ci
)
360 ish
= (ciEntry
.shift
& NBNXN_CI_SHIFT
);
362 cjind0
= ciEntry
.cj_ind_start
;
363 cjind1
= ciEntry
.cj_ind_end
;
365 ci_sh
= (ish
== CENTRAL
? ci
: -1);
367 shX_S
= SimdReal(shiftvec
[ish3
]);
368 shY_S
= SimdReal(shiftvec
[ish3
+ 1]);
369 shZ_S
= SimdReal(shiftvec
[ish3
+ 2]);
372 int sci
= ci
* STRIDE
;
373 int scix
= sci
* DIM
;
374 # if defined LJ_COMB_LB || defined LJ_COMB_GEOM || defined LJ_EWALD_GEOM
378 int sci
= (ci
>> 1) * STRIDE
;
379 int scix
= sci
* DIM
+ (ci
& 1) * (STRIDE
>> 1);
380 # if defined LJ_COMB_LB || defined LJ_COMB_GEOM || defined LJ_EWALD_GEOM
381 int sci2
= sci
* 2 + (ci
& 1) * (STRIDE
>> 1);
383 sci
+= (ci
& 1) * (STRIDE
>> 1);
386 /* We have 5 LJ/C combinations, but use only three inner loops,
387 * as the other combinations are unlikely and/or not much faster:
388 * inner half-LJ + C for half-LJ + C / no-LJ + C
389 * inner LJ + C for full-LJ + C
390 * inner LJ for full-LJ + no-C / half-LJ + no-C
392 do_LJ
= ((ciEntry
.shift
& NBNXN_CI_DO_LJ(0)) != 0);
393 do_coul
= ((ciEntry
.shift
& NBNXN_CI_DO_COUL(0)) != 0);
394 half_LJ
= (((ciEntry
.shift
& NBNXN_CI_HALF_LJ(0)) != 0) || !do_LJ
) && do_coul
;
397 egps_i
= nbatParams
.energrp
[ci
];
401 for (ia
= 0; ia
< UNROLLI
; ia
++)
403 egp_ia
= (egps_i
>> (ia
* egps_ishift
)) & egps_imask
;
404 vvdwtp
[ia
] = Vvdw
+ egp_ia
* Vstride_i
;
405 vctp
[ia
] = Vc
+ egp_ia
* Vstride_i
;
411 # ifdef LJ_EWALD_GEOM
412 gmx_bool do_self
= TRUE
;
414 gmx_bool do_self
= do_coul
;
417 if (do_self
&& l_cj
[ciEntry
.cj_ind_start
].cj
== ci_sh
)
420 if (do_self
&& l_cj
[ciEntry
.cj_ind_start
].cj
== (ci_sh
>> 1))
429 Vc_sub_self
= 0.5 * ic
->c_rf
;
431 # ifdef CALC_COUL_TAB
433 Vc_sub_self
= 0.5 * tab_coul_F
[2];
435 Vc_sub_self
= 0.5 * tab_coul_V
[0];
438 # ifdef CALC_COUL_EWALD
440 Vc_sub_self
= 0.5 * ic
->ewaldcoeff_q
* M_2_SQRTPI
;
443 for (ia
= 0; ia
< UNROLLI
; ia
++)
448 # ifdef ENERGY_GROUPS
449 vctp
[ia
][((egps_i
>> (ia
* egps_ishift
)) & egps_imask
) * egps_jstride
]
453 -= facel
* qi
* qi
* Vc_sub_self
;
457 # ifdef LJ_EWALD_GEOM
461 for (ia
= 0; ia
< UNROLLI
; ia
++)
465 c6_i
= nbatParams
.nbfp
[nbatParams
.type
[sci
+ ia
] * (nbatParams
.numTypes
+ 1) * 2]
467 # ifdef ENERGY_GROUPS
468 vvdwtp
[ia
][((egps_i
>> (ia
* egps_ishift
)) & egps_imask
) * egps_jstride
]
472 += 0.5 * c6_i
* lj_ewaldcoeff6_6
;
475 # endif /* LJ_EWALD */
479 /* Load i atom data */
480 int sciy
= scix
+ STRIDE
;
481 int sciz
= sciy
+ STRIDE
;
482 ix_S0
= loadU1DualHsimd(x
+ scix
);
483 ix_S2
= loadU1DualHsimd(x
+ scix
+ 2);
484 iy_S0
= loadU1DualHsimd(x
+ sciy
);
485 iy_S2
= loadU1DualHsimd(x
+ sciy
+ 2);
486 iz_S0
= loadU1DualHsimd(x
+ sciz
);
487 iz_S2
= loadU1DualHsimd(x
+ sciz
+ 2);
488 ix_S0
= ix_S0
+ shX_S
;
489 ix_S2
= ix_S2
+ shX_S
;
490 iy_S0
= iy_S0
+ shY_S
;
491 iy_S2
= iy_S2
+ shY_S
;
492 iz_S0
= iz_S0
+ shZ_S
;
493 iz_S2
= iz_S2
+ shZ_S
;
499 facel_S
= SimdReal(facel
);
501 iq_S0
= loadU1DualHsimd(q
+ sci
);
502 iq_S2
= loadU1DualHsimd(q
+ sci
+ 2);
503 iq_S0
= facel_S
* iq_S0
;
504 iq_S2
= facel_S
* iq_S2
;
508 hsig_i_S0
= loadU1DualHsimd(ljc
+ sci2
);
509 hsig_i_S2
= loadU1DualHsimd(ljc
+ sci2
+ 2);
510 seps_i_S0
= loadU1DualHsimd(ljc
+ sci2
+ STRIDE
);
511 seps_i_S2
= loadU1DualHsimd(ljc
+ sci2
+ STRIDE
+ 2);
514 SimdReal c6s_S0
, c12s_S0
;
515 SimdReal c6s_S2
, c12s_S2
;
517 c6s_S0
= loadU1DualHsimd(ljc
+ sci2
);
521 c6s_S2
= loadU1DualHsimd(ljc
+ sci2
+ 2);
523 c12s_S0
= loadU1DualHsimd(ljc
+ sci2
+ STRIDE
);
526 c12s_S2
= loadU1DualHsimd(ljc
+ sci2
+ STRIDE
+ 2);
528 # elif !defined LJ_COMB_LB && !defined FIX_LJ_C
529 const int numTypes
= nbatParams
.numTypes
;
530 const real
* nbfp0
= nbfp_ptr
+ type
[sci
] * numTypes
* c_simdBestPairAlignment
;
531 const real
* nbfp1
= nbfp_ptr
+ type
[sci
+ 1] * numTypes
* c_simdBestPairAlignment
;
532 const real
*nbfp2
= nullptr, *nbfp3
= nullptr;
535 nbfp2
= nbfp_ptr
+ type
[sci
+ 2] * numTypes
* c_simdBestPairAlignment
;
536 nbfp3
= nbfp_ptr
+ type
[sci
+ 3] * numTypes
* c_simdBestPairAlignment
;
541 /* We need the geometrically combined C6 for the PME grid correction */
542 SimdReal c6s_S0
, c6s_S2
;
543 c6s_S0
= loadU1DualHsimd(ljc
+ sci2
);
546 c6s_S2
= loadU1DualHsimd(ljc
+ sci2
+ 2);
550 /* Zero the potential energy for this list */
552 SimdReal Vvdwtot_S
= setZero();
553 SimdReal vctot_S
= setZero();
556 /* Clear i atom forces */
566 /* Currently all kernels use (at least half) LJ */
570 /* Coulomb: all i-atoms, LJ: first half i-atoms */
574 while (cjind
< cjind1
&& nbl
->cj
[cjind
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
576 #include "kernel_inner.h"
580 for (; (cjind
< cjind1
); cjind
++)
582 #include "kernel_inner.h"
589 /* Coulomb: all i-atoms, LJ: all i-atoms */
592 while (cjind
< cjind1
&& nbl
->cj
[cjind
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
594 #include "kernel_inner.h"
598 for (; (cjind
< cjind1
); cjind
++)
600 #include "kernel_inner.h"
606 /* Coulomb: none, LJ: all i-atoms */
608 while (cjind
< cjind1
&& nbl
->cj
[cjind
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
610 #include "kernel_inner.h"
614 for (; (cjind
< cjind1
); cjind
++)
616 #include "kernel_inner.h"
620 ninner
+= cjind1
- cjind0
;
622 /* Add accumulated i-forces to the force array */
623 real fShiftX
= reduceIncr4ReturnSumHsimd(f
+ scix
, fix_S0
, fix_S2
);
624 real fShiftY
= reduceIncr4ReturnSumHsimd(f
+ sciy
, fiy_S0
, fiy_S2
);
625 real fShiftZ
= reduceIncr4ReturnSumHsimd(f
+ sciz
, fiz_S0
, fiz_S2
);
627 #ifdef CALC_SHIFTFORCES
628 fshift
[ish3
+ 0] += fShiftX
;
629 fshift
[ish3
+ 1] += fShiftY
;
630 fshift
[ish3
+ 2] += fShiftZ
;
636 *Vc
+= reduce(vctot_S
);
638 *Vvdw
+= reduce(Vvdwtot_S
);
641 /* Outer loop uses 6 flops/iteration */
645 printf("atom pairs %d\n", npair
);