OCL: Make variables const
[gromacs.git] / src / gromacs / mdlib / force.cpp
blob93e623f128bb288eb15ed3188c998fb9bc2e591f
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "force.h"
41 #include "config.h"
43 #include <cassert>
44 #include <cmath>
45 #include <cstring>
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/ewald/ewald.h"
50 #include "gromacs/ewald/long-range-correction.h"
51 #include "gromacs/ewald/pme.h"
52 #include "gromacs/gmxlib/network.h"
53 #include "gromacs/gmxlib/nrnb.h"
54 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
55 #include "gromacs/listed-forces/listed-forces.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/math/vecdump.h"
58 #include "gromacs/mdlib/force_flags.h"
59 #include "gromacs/mdlib/forcerec-threading.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/ns.h"
62 #include "gromacs/mdlib/qmmm.h"
63 #include "gromacs/mdlib/rf_util.h"
64 #include "gromacs/mdlib/wall.h"
65 #include "gromacs/mdtypes/commrec.h"
66 #include "gromacs/mdtypes/enerdata.h"
67 #include "gromacs/mdtypes/forceoutput.h"
68 #include "gromacs/mdtypes/forcerec.h"
69 #include "gromacs/mdtypes/inputrec.h"
70 #include "gromacs/mdtypes/md_enums.h"
71 #include "gromacs/pbcutil/ishift.h"
72 #include "gromacs/pbcutil/mshift.h"
73 #include "gromacs/pbcutil/pbc.h"
74 #include "gromacs/timing/wallcycle.h"
75 #include "gromacs/utility/cstringutil.h"
76 #include "gromacs/utility/exceptions.h"
77 #include "gromacs/utility/fatalerror.h"
78 #include "gromacs/utility/smalloc.h"
80 void ns(FILE *fp,
81 t_forcerec *fr,
82 matrix box,
83 const gmx_groups_t *groups,
84 gmx_localtop_t *top,
85 const t_mdatoms *md,
86 const t_commrec *cr,
87 t_nrnb *nrnb,
88 gmx_bool bFillGrid)
90 int nsearch;
93 if (!fr->ns->nblist_initialized)
95 init_neighbor_list(fp, fr, md->homenr);
98 nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
99 bFillGrid);
100 if (debug)
102 fprintf(debug, "nsearch = %d\n", nsearch);
105 /* Check whether we have to do dynamic load balancing */
106 /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
107 count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
108 &(top->idef),opts->ngener);
110 if (fr->ns->dump_nl > 0)
112 dump_nblist(fp, cr, fr, fr->ns->dump_nl);
116 static void clearEwaldThreadOutput(ewald_corr_thread_t *ewc_t)
118 ewc_t->Vcorr_q = 0;
119 ewc_t->Vcorr_lj = 0;
120 ewc_t->dvdl[efptCOUL] = 0;
121 ewc_t->dvdl[efptVDW] = 0;
122 clear_mat(ewc_t->vir_q);
123 clear_mat(ewc_t->vir_lj);
126 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t *ewc_t)
128 ewald_corr_thread_t &dest = ewc_t[0];
130 for (int t = 1; t < nthreads; t++)
132 dest.Vcorr_q += ewc_t[t].Vcorr_q;
133 dest.Vcorr_lj += ewc_t[t].Vcorr_lj;
134 dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
135 dest.dvdl[efptVDW] += ewc_t[t].dvdl[efptVDW];
136 m_add(dest.vir_q, ewc_t[t].vir_q, dest.vir_q);
137 m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
141 void do_force_lowlevel(t_forcerec *fr,
142 const t_inputrec *ir,
143 const t_idef *idef,
144 const t_commrec *cr,
145 const gmx_multisim_t *ms,
146 t_nrnb *nrnb,
147 gmx_wallcycle_t wcycle,
148 const t_mdatoms *md,
149 rvec x[],
150 history_t *hist,
151 rvec *forceForUseWithShiftForces,
152 gmx::ForceWithVirial *forceWithVirial,
153 gmx_enerdata_t *enerd,
154 t_fcdata *fcd,
155 matrix box,
156 t_lambda *fepvals,
157 real *lambda,
158 const t_graph *graph,
159 const t_blocka *excl,
160 rvec mu_tot[],
161 int flags,
162 float *cycles_pme)
164 int i, j;
165 int donb_flags;
166 int pme_flags;
167 t_pbc pbc;
168 real dvdl_dum[efptNR], dvdl_nb[efptNR];
170 #if GMX_MPI
171 double t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
172 #endif
174 set_pbc(&pbc, fr->ePBC, box);
176 /* reset free energy components */
177 for (i = 0; i < efptNR; i++)
179 dvdl_nb[i] = 0;
180 dvdl_dum[i] = 0;
183 /* do QMMM first if requested */
184 if (fr->bQMMM)
186 enerd->term[F_EQM] = calculate_QMMM(cr, forceForUseWithShiftForces, fr);
189 /* Call the short range functions all in one go. */
191 #if GMX_MPI
192 /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
193 #define TAKETIME FALSE
194 if (TAKETIME)
196 MPI_Barrier(cr->mpi_comm_mygroup);
197 t0 = MPI_Wtime();
199 #endif
201 if (ir->nwall)
203 /* foreign lambda component for walls */
204 real dvdl_walls = do_walls(*ir, *fr, box, *md, x,
205 forceWithVirial, lambda[efptVDW],
206 enerd->grpp.ener[egLJSR], nrnb);
207 enerd->dvdl_lin[efptVDW] += dvdl_walls;
210 /* We only do non-bonded calculation with group scheme here, the verlet
211 * calls are done from do_force_cutsVERLET(). */
212 if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
214 donb_flags = 0;
215 /* Add short-range interactions */
216 donb_flags |= GMX_NONBONDED_DO_SR;
218 /* Currently all group scheme kernels always calculate (shift-)forces */
219 if (flags & GMX_FORCE_FORCES)
221 donb_flags |= GMX_NONBONDED_DO_FORCE;
223 if (flags & GMX_FORCE_VIRIAL)
225 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
227 if (flags & GMX_FORCE_ENERGY)
229 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
232 wallcycle_sub_start(wcycle, ewcsNONBONDED);
233 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
234 &enerd->grpp, nrnb,
235 lambda, dvdl_nb, -1, -1, donb_flags);
237 /* If we do foreign lambda and we have soft-core interactions
238 * we have to recalculate the (non-linear) energies contributions.
240 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
242 for (i = 0; i < enerd->n_lambda; i++)
244 real lam_i[efptNR];
246 for (j = 0; j < efptNR; j++)
248 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
250 reset_foreign_enerdata(enerd);
251 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
252 &(enerd->foreign_grpp), nrnb,
253 lam_i, dvdl_dum, -1, -1,
254 (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
255 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
256 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
259 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
262 #if GMX_MPI
263 if (TAKETIME)
265 t1 = MPI_Wtime();
266 fr->t_fnbf += t1-t0;
268 #endif
270 if (fepvals->sc_alpha != 0)
272 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
274 else
276 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
279 if (fepvals->sc_alpha != 0)
281 /* even though coulomb part is linear, we already added it, beacuse we
282 need to go through the vdw calculation anyway */
284 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
286 else
288 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
291 if (debug)
293 pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
296 /* Shift the coordinates. Must be done before listed forces and PPPM,
297 * but is also necessary for SHAKE and update, therefore it can NOT
298 * go when no listed forces have to be evaluated.
300 * The shifting and PBC code is deliberately not timed, since with
301 * the Verlet scheme it only takes non-zero time with triclinic
302 * boxes, and even then the time is around a factor of 100 less
303 * than the next smallest counter.
307 /* Here sometimes we would not need to shift with NBFonly,
308 * but we do so anyhow for consistency of the returned coordinates.
310 if (graph)
312 shift_self(graph, box, x);
313 if (TRICLINIC(box))
315 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
317 else
319 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
322 /* Check whether we need to do listed interactions or correct for exclusions */
323 if (fr->bMolPBC &&
324 ((flags & GMX_FORCE_LISTED)
325 || EEL_RF(fr->ic->eeltype) || EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)))
327 /* TODO There are no electrostatics methods that require this
328 transformation, when using the Verlet scheme, so update the
329 above conditional. */
330 /* Since all atoms are in the rectangular or triclinic unit-cell,
331 * only single box vector shifts (2 in x) are required.
333 set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
334 TRUE, box);
337 do_force_listed(wcycle, box, ir->fepvals, cr, ms,
338 idef, x, hist,
339 forceForUseWithShiftForces, forceWithVirial,
340 fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
341 DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr,
342 flags);
345 *cycles_pme = 0;
347 /* Do long-range electrostatics and/or LJ-PME, including related short-range
348 * corrections.
350 if (EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
352 int status = 0;
353 real Vlr_q = 0, Vlr_lj = 0;
355 /* We reduce all virial, dV/dlambda and energy contributions, except
356 * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
358 ewald_corr_thread_t &ewaldOutput = fr->ewc_t[0];
359 clearEwaldThreadOutput(&ewaldOutput);
361 if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
363 /* With the Verlet scheme exclusion forces are calculated
364 * in the non-bonded kernel.
366 /* The TPI molecule does not have exclusions with the rest
367 * of the system and no intra-molecular PME grid
368 * contributions will be calculated in
369 * gmx_pme_calc_energy.
371 if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
372 ir->ewald_geometry != eewg3D ||
373 ir->epsilon_surface != 0)
375 int nthreads, t;
377 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
379 if (fr->n_tpi > 0)
381 gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
384 nthreads = fr->nthread_ewc;
385 #pragma omp parallel for num_threads(nthreads) schedule(static)
386 for (t = 0; t < nthreads; t++)
390 ewald_corr_thread_t &ewc_t = fr->ewc_t[t];
391 if (t > 0)
393 clearEwaldThreadOutput(&ewc_t);
396 /* Threading is only supported with the Verlet cut-off
397 * scheme and then only single particle forces (no
398 * exclusion forces) are calculated, so we can store
399 * the forces in the normal, single forceWithVirial->force_ array.
401 ewald_LRcorrection(md->homenr, cr, nthreads, t, fr, ir,
402 md->chargeA, md->chargeB,
403 md->sqrt_c6A, md->sqrt_c6B,
404 md->sigmaA, md->sigmaB,
405 md->sigma3A, md->sigma3B,
406 (md->nChargePerturbed != 0) || (md->nTypePerturbed != 0),
407 ir->cutoff_scheme != ecutsVERLET,
408 excl, x, box, mu_tot,
409 ir->ewald_geometry,
410 ir->epsilon_surface,
411 as_rvec_array(forceWithVirial->force_.data()),
412 ewc_t.vir_q, ewc_t.vir_lj,
413 &ewc_t.Vcorr_q, &ewc_t.Vcorr_lj,
414 lambda[efptCOUL], lambda[efptVDW],
415 &ewc_t.dvdl[efptCOUL], &ewc_t.dvdl[efptVDW]);
417 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
419 if (nthreads > 1)
421 reduceEwaldThreadOuput(nthreads, fr->ewc_t);
423 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
426 if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
428 /* This is not in a subcounter because it takes a
429 negligible and constant-sized amount of time */
430 ewaldOutput.Vcorr_q +=
431 ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
432 &ewaldOutput.dvdl[efptCOUL],
433 ewaldOutput.vir_q);
436 if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
437 thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
439 /* Do reciprocal PME for Coulomb and/or LJ. */
440 assert(fr->n_tpi >= 0);
441 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
443 pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
445 if (flags & GMX_FORCE_FORCES)
447 pme_flags |= GMX_PME_CALC_F;
449 if (flags & GMX_FORCE_VIRIAL)
451 pme_flags |= GMX_PME_CALC_ENER_VIR;
453 if (fr->n_tpi > 0)
455 /* We don't calculate f, but we do want the potential */
456 pme_flags |= GMX_PME_CALC_POT;
459 /* With domain decomposition we close the CPU side load
460 * balancing region here, because PME does global
461 * communication that acts as a global barrier.
463 if (DOMAINDECOMP(cr))
465 ddCloseBalanceRegionCpu(cr->dd);
468 wallcycle_start(wcycle, ewcPMEMESH);
469 status = gmx_pme_do(fr->pmedata,
470 0, md->homenr - fr->n_tpi,
472 as_rvec_array(forceWithVirial->force_.data()),
473 md->chargeA, md->chargeB,
474 md->sqrt_c6A, md->sqrt_c6B,
475 md->sigmaA, md->sigmaB,
476 box, cr,
477 DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
478 DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
479 nrnb, wcycle,
480 ewaldOutput.vir_q, ewaldOutput.vir_lj,
481 &Vlr_q, &Vlr_lj,
482 lambda[efptCOUL], lambda[efptVDW],
483 &ewaldOutput.dvdl[efptCOUL],
484 &ewaldOutput.dvdl[efptVDW],
485 pme_flags);
486 *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
487 if (status != 0)
489 gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
492 /* We should try to do as little computation after
493 * this as possible, because parallel PME synchronizes
494 * the nodes, so we want all load imbalance of the
495 * rest of the force calculation to be before the PME
496 * call. DD load balancing is done on the whole time
497 * of the force call (without PME).
500 if (fr->n_tpi > 0)
502 if (EVDW_PME(ir->vdwtype))
505 gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
507 /* Determine the PME grid energy of the test molecule
508 * with the PME grid potential of the other charges.
510 gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
511 x + md->homenr - fr->n_tpi,
512 md->chargeA + md->homenr - fr->n_tpi,
513 &Vlr_q);
518 if (!EEL_PME(fr->ic->eeltype) && EEL_PME_EWALD(fr->ic->eeltype))
520 Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial->force_.data()),
521 md->chargeA, md->chargeB,
522 box, cr, md->homenr,
523 ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
524 lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL],
525 fr->ewald_table);
528 /* Note that with separate PME nodes we get the real energies later */
529 forceWithVirial->addVirialContribution(ewaldOutput.vir_q);
530 forceWithVirial->addVirialContribution(ewaldOutput.vir_lj);
531 enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
532 enerd->dvdl_lin[efptVDW] += ewaldOutput.dvdl[efptVDW];
533 enerd->term[F_COUL_RECIP] = Vlr_q + ewaldOutput.Vcorr_q;
534 enerd->term[F_LJ_RECIP] = Vlr_lj + ewaldOutput.Vcorr_lj;
536 if (debug)
538 fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
539 Vlr_q, ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
540 pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
541 pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
542 fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
543 Vlr_lj, ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
544 pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
547 else
549 /* Is there a reaction-field exclusion correction needed?
550 * With the Verlet scheme, exclusion forces are calculated
551 * in the non-bonded kernel.
553 if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->ic->eeltype))
555 real dvdl_rf_excl = 0;
556 enerd->term[F_RF_EXCL] =
557 RF_excl_correction(fr, graph, md, excl, DOMAINDECOMP(cr),
558 x, forceForUseWithShiftForces,
559 fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
561 enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
565 if (debug)
567 print_nrnb(debug, nrnb);
570 #if GMX_MPI
571 if (TAKETIME)
573 t2 = MPI_Wtime();
574 MPI_Barrier(cr->mpi_comm_mygroup);
575 t3 = MPI_Wtime();
576 fr->t_wait += t3-t2;
577 if (fr->timesteps == 11)
579 char buf[22];
580 fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
581 cr->nodeid, gmx_step_str(fr->timesteps, buf),
582 100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
583 (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
585 fr->timesteps++;
587 #endif
589 if (debug)
591 pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
596 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
598 int i, n2;
600 for (i = 0; i < F_NRE; i++)
602 enerd->term[i] = 0;
603 enerd->foreign_term[i] = 0;
607 for (i = 0; i < efptNR; i++)
609 enerd->dvdl_lin[i] = 0;
610 enerd->dvdl_nonlin[i] = 0;
613 n2 = ngener*ngener;
614 if (debug)
616 fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
618 enerd->grpp.nener = n2;
619 enerd->foreign_grpp.nener = n2;
620 for (i = 0; (i < egNR); i++)
622 snew(enerd->grpp.ener[i], n2);
623 snew(enerd->foreign_grpp.ener[i], n2);
626 if (n_lambda)
628 enerd->n_lambda = 1 + n_lambda;
629 snew(enerd->enerpart_lambda, enerd->n_lambda);
631 else
633 enerd->n_lambda = 0;
637 void destroy_enerdata(gmx_enerdata_t *enerd)
639 int i;
641 for (i = 0; (i < egNR); i++)
643 sfree(enerd->grpp.ener[i]);
646 for (i = 0; (i < egNR); i++)
648 sfree(enerd->foreign_grpp.ener[i]);
651 if (enerd->n_lambda)
653 sfree(enerd->enerpart_lambda);
657 static real sum_v(int n, const real v[])
659 real t;
660 int i;
662 t = 0.0;
663 for (i = 0; (i < n); i++)
665 t = t + v[i];
668 return t;
671 void sum_epot(gmx_grppairener_t *grpp, real *epot)
673 int i;
675 /* Accumulate energies */
676 epot[F_COUL_SR] = sum_v(grpp->nener, grpp->ener[egCOULSR]);
677 epot[F_LJ] = sum_v(grpp->nener, grpp->ener[egLJSR]);
678 epot[F_LJ14] = sum_v(grpp->nener, grpp->ener[egLJ14]);
679 epot[F_COUL14] = sum_v(grpp->nener, grpp->ener[egCOUL14]);
681 /* lattice part of LR doesnt belong to any group
682 * and has been added earlier
684 epot[F_BHAM] = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
686 epot[F_EPOT] = 0;
687 for (i = 0; (i < F_EPOT); i++)
689 if (i != F_DISRESVIOL && i != F_ORIRESDEV)
691 epot[F_EPOT] += epot[i];
696 void sum_dhdl(gmx_enerdata_t *enerd, gmx::ArrayRef<const real> lambda, t_lambda *fepvals)
698 int index;
699 double dlam;
701 enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW]; /* include dispersion correction */
702 enerd->term[F_DVDL] = 0.0;
703 for (int i = 0; i < efptNR; i++)
705 if (fepvals->separate_dvdl[i])
707 /* could this be done more readably/compactly? */
708 switch (i)
710 case (efptMASS):
711 index = F_DKDL;
712 break;
713 case (efptCOUL):
714 index = F_DVDL_COUL;
715 break;
716 case (efptVDW):
717 index = F_DVDL_VDW;
718 break;
719 case (efptBONDED):
720 index = F_DVDL_BONDED;
721 break;
722 case (efptRESTRAINT):
723 index = F_DVDL_RESTRAINT;
724 break;
725 default:
726 index = F_DVDL;
727 break;
729 enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
730 if (debug)
732 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
733 efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
736 else
738 enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
739 if (debug)
741 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
742 efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
747 /* Notes on the foreign lambda free energy difference evaluation:
748 * Adding the potential and ekin terms that depend linearly on lambda
749 * as delta lam * dvdl to the energy differences is exact.
750 * For the constraints this is not exact, but we have no other option
751 * without literally changing the lengths and reevaluating the energies at each step.
752 * (try to remedy this post 4.6 - MRS)
754 if (fepvals->separate_dvdl[efptBONDED])
756 enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
758 else
760 enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
762 enerd->term[F_DVDL_CONSTR] = 0;
764 for (int i = 0; i < fepvals->n_lambda; i++)
766 /* note we are iterating over fepvals here!
767 For the current lam, dlam = 0 automatically,
768 so we don't need to add anything to the
769 enerd->enerpart_lambda[0] */
771 /* we don't need to worry about dvdl_lin contributions to dE at
772 current lambda, because the contributions to the current
773 lambda are automatically zeroed */
775 for (gmx::index j = 0; j < lambda.size(); j++)
777 /* Note that this loop is over all dhdl components, not just the separated ones */
778 dlam = (fepvals->all_lambda[j][i] - lambda[j]);
779 enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
780 if (debug)
782 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
783 fepvals->all_lambda[j][i], efpt_names[j],
784 (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
785 dlam, enerd->dvdl_lin[j]);
792 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
794 int i, j;
796 /* First reset all foreign energy components. Foreign energies always called on
797 neighbor search steps */
798 for (i = 0; (i < egNR); i++)
800 for (j = 0; (j < enerd->grpp.nener); j++)
802 enerd->foreign_grpp.ener[i][j] = 0.0;
806 /* potential energy components */
807 for (i = 0; (i <= F_EPOT); i++)
809 enerd->foreign_term[i] = 0.0;
813 void reset_enerdata(gmx_enerdata_t *enerd)
815 int i, j;
817 /* First reset all energy components. */
818 for (i = 0; (i < egNR); i++)
820 for (j = 0; (j < enerd->grpp.nener); j++)
822 enerd->grpp.ener[i][j] = 0.0;
825 for (i = 0; i < efptNR; i++)
827 enerd->dvdl_lin[i] = 0.0;
828 enerd->dvdl_nonlin[i] = 0.0;
831 /* Normal potential energy components */
832 for (i = 0; (i <= F_EPOT); i++)
834 enerd->term[i] = 0.0;
836 enerd->term[F_DVDL] = 0.0;
837 enerd->term[F_DVDL_COUL] = 0.0;
838 enerd->term[F_DVDL_VDW] = 0.0;
839 enerd->term[F_DVDL_BONDED] = 0.0;
840 enerd->term[F_DVDL_RESTRAINT] = 0.0;
841 enerd->term[F_DKDL] = 0.0;
842 if (enerd->n_lambda > 0)
844 for (i = 0; i < enerd->n_lambda; i++)
846 enerd->enerpart_lambda[i] = 0.0;
849 /* reset foreign energy data - separate function since we also call it elsewhere */
850 reset_foreign_enerdata(enerd);