Merge branch release-2018 into release-2019
[gromacs.git] / src / gromacs / mdrun / minimize.cpp
blob22f9f4f7cedffa4a6e690c9e1cfd01dfb1eb88ba
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \internal \file
39 * \brief This file defines integrators for energy minimization
41 * \author Berk Hess <hess@kth.se>
42 * \author Erik Lindahl <erik@kth.se>
43 * \ingroup module_mdrun
45 #include "gmxpre.h"
47 #include "config.h"
49 #include <cmath>
50 #include <cstring>
51 #include <ctime>
53 #include <algorithm>
54 #include <vector>
56 #include "gromacs/commandline/filenm.h"
57 #include "gromacs/domdec/collect.h"
58 #include "gromacs/domdec/domdec.h"
59 #include "gromacs/domdec/domdec_struct.h"
60 #include "gromacs/domdec/partition.h"
61 #include "gromacs/ewald/pme.h"
62 #include "gromacs/fileio/confio.h"
63 #include "gromacs/fileio/mtxio.h"
64 #include "gromacs/gmxlib/network.h"
65 #include "gromacs/gmxlib/nrnb.h"
66 #include "gromacs/imd/imd.h"
67 #include "gromacs/linearalgebra/sparsematrix.h"
68 #include "gromacs/listed-forces/manage-threading.h"
69 #include "gromacs/math/functions.h"
70 #include "gromacs/math/vec.h"
71 #include "gromacs/mdlib/constr.h"
72 #include "gromacs/mdlib/force.h"
73 #include "gromacs/mdlib/forcerec.h"
74 #include "gromacs/mdlib/gmx_omp_nthreads.h"
75 #include "gromacs/mdlib/md_support.h"
76 #include "gromacs/mdlib/mdatoms.h"
77 #include "gromacs/mdlib/mdebin.h"
78 #include "gromacs/mdlib/mdrun.h"
79 #include "gromacs/mdlib/mdsetup.h"
80 #include "gromacs/mdlib/ns.h"
81 #include "gromacs/mdlib/shellfc.h"
82 #include "gromacs/mdlib/sim_util.h"
83 #include "gromacs/mdlib/tgroup.h"
84 #include "gromacs/mdlib/trajectory_writing.h"
85 #include "gromacs/mdlib/update.h"
86 #include "gromacs/mdlib/vsite.h"
87 #include "gromacs/mdtypes/commrec.h"
88 #include "gromacs/mdtypes/inputrec.h"
89 #include "gromacs/mdtypes/md_enums.h"
90 #include "gromacs/mdtypes/state.h"
91 #include "gromacs/pbcutil/mshift.h"
92 #include "gromacs/pbcutil/pbc.h"
93 #include "gromacs/timing/wallcycle.h"
94 #include "gromacs/timing/walltime_accounting.h"
95 #include "gromacs/topology/mtop_util.h"
96 #include "gromacs/topology/topology.h"
97 #include "gromacs/utility/cstringutil.h"
98 #include "gromacs/utility/exceptions.h"
99 #include "gromacs/utility/fatalerror.h"
100 #include "gromacs/utility/logger.h"
101 #include "gromacs/utility/smalloc.h"
103 #include "integrator.h"
105 //! Utility structure for manipulating states during EM
106 typedef struct {
107 //! Copy of the global state
108 t_state s;
109 //! Force array
110 PaddedVector<gmx::RVec> f;
111 //! Potential energy
112 real epot;
113 //! Norm of the force
114 real fnorm;
115 //! Maximum force
116 real fmax;
117 //! Direction
118 int a_fmax;
119 } em_state_t;
121 //! Print the EM starting conditions
122 static void print_em_start(FILE *fplog,
123 const t_commrec *cr,
124 gmx_walltime_accounting_t walltime_accounting,
125 gmx_wallcycle_t wcycle,
126 const char *name)
128 walltime_accounting_start_time(walltime_accounting);
129 wallcycle_start(wcycle, ewcRUN);
130 print_start(fplog, cr, walltime_accounting, name);
133 //! Stop counting time for EM
134 static void em_time_end(gmx_walltime_accounting_t walltime_accounting,
135 gmx_wallcycle_t wcycle)
137 wallcycle_stop(wcycle, ewcRUN);
139 walltime_accounting_end_time(walltime_accounting);
142 //! Printing a log file and console header
143 static void sp_header(FILE *out, const char *minimizer, real ftol, int nsteps)
145 fprintf(out, "\n");
146 fprintf(out, "%s:\n", minimizer);
147 fprintf(out, " Tolerance (Fmax) = %12.5e\n", ftol);
148 fprintf(out, " Number of steps = %12d\n", nsteps);
151 //! Print warning message
152 static void warn_step(FILE *fp,
153 real ftol,
154 real fmax,
155 gmx_bool bLastStep,
156 gmx_bool bConstrain)
158 constexpr bool realIsDouble = GMX_DOUBLE;
159 char buffer[2048];
161 if (!std::isfinite(fmax))
163 sprintf(buffer,
164 "\nEnergy minimization has stopped because the force "
165 "on at least one atom is not finite. This usually means "
166 "atoms are overlapping. Modify the input coordinates to "
167 "remove atom overlap or use soft-core potentials with "
168 "the free energy code to avoid infinite forces.\n%s",
169 !realIsDouble ?
170 "You could also be lucky that switching to double precision "
171 "is sufficient to obtain finite forces.\n" :
172 "");
174 else if (bLastStep)
176 sprintf(buffer,
177 "\nEnergy minimization reached the maximum number "
178 "of steps before the forces reached the requested "
179 "precision Fmax < %g.\n", ftol);
181 else
183 sprintf(buffer,
184 "\nEnergy minimization has stopped, but the forces have "
185 "not converged to the requested precision Fmax < %g (which "
186 "may not be possible for your system). It stopped "
187 "because the algorithm tried to make a new step whose size "
188 "was too small, or there was no change in the energy since "
189 "last step. Either way, we regard the minimization as "
190 "converged to within the available machine precision, "
191 "given your starting configuration and EM parameters.\n%s%s",
192 ftol,
193 !realIsDouble ?
194 "\nDouble precision normally gives you higher accuracy, but "
195 "this is often not needed for preparing to run molecular "
196 "dynamics.\n" :
198 bConstrain ?
199 "You might need to increase your constraint accuracy, or turn\n"
200 "off constraints altogether (set constraints = none in mdp file)\n" :
201 "");
204 fputs(wrap_lines(buffer, 78, 0, FALSE), stderr);
205 fputs(wrap_lines(buffer, 78, 0, FALSE), fp);
208 //! Print message about convergence of the EM
209 static void print_converged(FILE *fp, const char *alg, real ftol,
210 int64_t count, gmx_bool bDone, int64_t nsteps,
211 const em_state_t *ems, double sqrtNumAtoms)
213 char buf[STEPSTRSIZE];
215 if (bDone)
217 fprintf(fp, "\n%s converged to Fmax < %g in %s steps\n",
218 alg, ftol, gmx_step_str(count, buf));
220 else if (count < nsteps)
222 fprintf(fp, "\n%s converged to machine precision in %s steps,\n"
223 "but did not reach the requested Fmax < %g.\n",
224 alg, gmx_step_str(count, buf), ftol);
226 else
228 fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n",
229 alg, ftol, gmx_step_str(count, buf));
232 #if GMX_DOUBLE
233 fprintf(fp, "Potential Energy = %21.14e\n", ems->epot);
234 fprintf(fp, "Maximum force = %21.14e on atom %d\n", ems->fmax, ems->a_fmax + 1);
235 fprintf(fp, "Norm of force = %21.14e\n", ems->fnorm/sqrtNumAtoms);
236 #else
237 fprintf(fp, "Potential Energy = %14.7e\n", ems->epot);
238 fprintf(fp, "Maximum force = %14.7e on atom %d\n", ems->fmax, ems->a_fmax + 1);
239 fprintf(fp, "Norm of force = %14.7e\n", ems->fnorm/sqrtNumAtoms);
240 #endif
243 //! Compute the norm and max of the force array in parallel
244 static void get_f_norm_max(const t_commrec *cr,
245 t_grpopts *opts, t_mdatoms *mdatoms, const rvec *f,
246 real *fnorm, real *fmax, int *a_fmax)
248 double fnorm2, *sum;
249 real fmax2, fam;
250 int la_max, a_max, start, end, i, m, gf;
252 /* This routine finds the largest force and returns it.
253 * On parallel machines the global max is taken.
255 fnorm2 = 0;
256 fmax2 = 0;
257 la_max = -1;
258 start = 0;
259 end = mdatoms->homenr;
260 if (mdatoms->cFREEZE)
262 for (i = start; i < end; i++)
264 gf = mdatoms->cFREEZE[i];
265 fam = 0;
266 for (m = 0; m < DIM; m++)
268 if (!opts->nFreeze[gf][m])
270 fam += gmx::square(f[i][m]);
273 fnorm2 += fam;
274 if (fam > fmax2)
276 fmax2 = fam;
277 la_max = i;
281 else
283 for (i = start; i < end; i++)
285 fam = norm2(f[i]);
286 fnorm2 += fam;
287 if (fam > fmax2)
289 fmax2 = fam;
290 la_max = i;
295 if (la_max >= 0 && DOMAINDECOMP(cr))
297 a_max = cr->dd->globalAtomIndices[la_max];
299 else
301 a_max = la_max;
303 if (PAR(cr))
305 snew(sum, 2*cr->nnodes+1);
306 sum[2*cr->nodeid] = fmax2;
307 sum[2*cr->nodeid+1] = a_max;
308 sum[2*cr->nnodes] = fnorm2;
309 gmx_sumd(2*cr->nnodes+1, sum, cr);
310 fnorm2 = sum[2*cr->nnodes];
311 /* Determine the global maximum */
312 for (i = 0; i < cr->nnodes; i++)
314 if (sum[2*i] > fmax2)
316 fmax2 = sum[2*i];
317 a_max = gmx::roundToInt(sum[2*i+1]);
320 sfree(sum);
323 if (fnorm)
325 *fnorm = sqrt(fnorm2);
327 if (fmax)
329 *fmax = sqrt(fmax2);
331 if (a_fmax)
333 *a_fmax = a_max;
337 //! Compute the norm of the force
338 static void get_state_f_norm_max(const t_commrec *cr,
339 t_grpopts *opts, t_mdatoms *mdatoms,
340 em_state_t *ems)
342 get_f_norm_max(cr, opts, mdatoms, ems->f.rvec_array(),
343 &ems->fnorm, &ems->fmax, &ems->a_fmax);
346 //! Initialize the energy minimization
347 static void init_em(FILE *fplog,
348 const gmx::MDLogger &mdlog,
349 const char *title,
350 const t_commrec *cr,
351 const gmx_multisim_t *ms,
352 gmx::IMDOutputProvider *outputProvider,
353 t_inputrec *ir,
354 const MdrunOptions &mdrunOptions,
355 t_state *state_global, gmx_mtop_t *top_global,
356 em_state_t *ems, gmx_localtop_t **top,
357 t_nrnb *nrnb, rvec mu_tot,
358 t_forcerec *fr, gmx_enerdata_t **enerd,
359 t_graph **graph, gmx::MDAtoms *mdAtoms, gmx_global_stat_t *gstat,
360 gmx_vsite_t *vsite, gmx::Constraints *constr, gmx_shellfc_t **shellfc,
361 int nfile, const t_filenm fnm[],
362 gmx_mdoutf_t *outf, t_mdebin **mdebin,
363 gmx_wallcycle_t wcycle)
365 real dvdl_constr;
367 if (fplog)
369 fprintf(fplog, "Initiating %s\n", title);
372 if (MASTER(cr))
374 state_global->ngtc = 0;
376 /* Initialize lambda variables */
377 initialize_lambdas(fplog, ir, &(state_global->fep_state), state_global->lambda, nullptr);
380 init_nrnb(nrnb);
382 /* Interactive molecular dynamics */
383 init_IMD(ir, cr, ms, top_global, fplog, 1,
384 MASTER(cr) ? state_global->x.rvec_array() : nullptr,
385 nfile, fnm, nullptr, mdrunOptions);
387 if (ir->eI == eiNM)
389 GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
391 *shellfc = init_shell_flexcon(stdout,
392 top_global,
393 constr ? constr->numFlexibleConstraints() : 0,
394 ir->nstcalcenergy,
395 DOMAINDECOMP(cr));
397 else
399 GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir->eI), "This else currently only handles energy minimizers, consider if your algorithm needs shell/flexible-constraint support");
401 /* With energy minimization, shells and flexible constraints are
402 * automatically minimized when treated like normal DOFS.
404 if (shellfc != nullptr)
406 *shellfc = nullptr;
410 auto mdatoms = mdAtoms->mdatoms();
411 if (DOMAINDECOMP(cr))
413 *top = dd_init_local_top(top_global);
415 dd_init_local_state(cr->dd, state_global, &ems->s);
417 /* Distribute the charge groups over the nodes from the master node */
418 dd_partition_system(fplog, mdlog, ir->init_step, cr, TRUE, 1,
419 state_global, top_global, ir,
420 &ems->s, &ems->f, mdAtoms, *top,
421 fr, vsite, constr,
422 nrnb, nullptr, FALSE);
423 dd_store_state(cr->dd, &ems->s);
425 *graph = nullptr;
427 else
429 state_change_natoms(state_global, state_global->natoms);
430 /* Just copy the state */
431 ems->s = *state_global;
432 state_change_natoms(&ems->s, ems->s.natoms);
433 ems->f.resizeWithPadding(ems->s.natoms);
435 snew(*top, 1);
436 mdAlgorithmsSetupAtomData(cr, ir, top_global, *top, fr,
437 graph, mdAtoms,
438 constr, vsite, shellfc ? *shellfc : nullptr);
440 if (vsite)
442 set_vsite_top(vsite, *top, mdatoms);
446 update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[efptMASS]);
448 if (constr)
450 // TODO how should this cross-module support dependency be managed?
451 if (ir->eConstrAlg == econtSHAKE &&
452 gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
454 gmx_fatal(FARGS, "Can not do energy minimization with %s, use %s\n",
455 econstr_names[econtSHAKE], econstr_names[econtLINCS]);
458 if (!ir->bContinuation)
460 /* Constrain the starting coordinates */
461 dvdl_constr = 0;
462 constr->apply(TRUE, TRUE,
463 -1, 0, 1.0,
464 ems->s.x.rvec_array(),
465 ems->s.x.rvec_array(),
466 nullptr,
467 ems->s.box,
468 ems->s.lambda[efptFEP], &dvdl_constr,
469 nullptr, nullptr, gmx::ConstraintVariable::Positions);
473 if (PAR(cr))
475 *gstat = global_stat_init(ir);
477 else
479 *gstat = nullptr;
482 *outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, ir, top_global, nullptr, wcycle);
484 snew(*enerd, 1);
485 init_enerdata(top_global->groups.grps[egcENER].nr, ir->fepvals->n_lambda,
486 *enerd);
488 if (mdebin != nullptr)
490 /* Init bin for energy stuff */
491 *mdebin = init_mdebin(mdoutf_get_fp_ene(*outf), top_global, ir, nullptr);
494 clear_rvec(mu_tot);
495 calc_shifts(ems->s.box, fr->shift_vec);
498 //! Finalize the minimization
499 static void finish_em(const t_commrec *cr, gmx_mdoutf_t outf,
500 gmx_walltime_accounting_t walltime_accounting,
501 gmx_wallcycle_t wcycle)
503 if (!thisRankHasDuty(cr, DUTY_PME))
505 /* Tell the PME only node to finish */
506 gmx_pme_send_finish(cr);
509 done_mdoutf(outf);
511 em_time_end(walltime_accounting, wcycle);
514 //! Swap two different EM states during minimization
515 static void swap_em_state(em_state_t **ems1, em_state_t **ems2)
517 em_state_t *tmp;
519 tmp = *ems1;
520 *ems1 = *ems2;
521 *ems2 = tmp;
524 //! Save the EM trajectory
525 static void write_em_traj(FILE *fplog, const t_commrec *cr,
526 gmx_mdoutf_t outf,
527 gmx_bool bX, gmx_bool bF, const char *confout,
528 gmx_mtop_t *top_global,
529 t_inputrec *ir, int64_t step,
530 em_state_t *state,
531 t_state *state_global,
532 ObservablesHistory *observablesHistory)
534 int mdof_flags = 0;
536 if (bX)
538 mdof_flags |= MDOF_X;
540 if (bF)
542 mdof_flags |= MDOF_F;
545 /* If we want IMD output, set appropriate MDOF flag */
546 if (ir->bIMD)
548 mdof_flags |= MDOF_IMD;
551 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags,
552 top_global, step, static_cast<double>(step),
553 &state->s, state_global, observablesHistory,
554 state->f);
556 if (confout != nullptr)
558 if (DOMAINDECOMP(cr))
560 /* If bX=true, x was collected to state_global in the call above */
561 if (!bX)
563 gmx::ArrayRef<gmx::RVec> globalXRef = MASTER(cr) ? makeArrayRef(state_global->x) : gmx::EmptyArrayRef();
564 dd_collect_vec(cr->dd, &state->s, makeArrayRef(state->s.x), globalXRef);
567 else
569 /* Copy the local state pointer */
570 state_global = &state->s;
573 if (MASTER(cr))
575 if (ir->ePBC != epbcNONE && !ir->bPeriodicMols && DOMAINDECOMP(cr))
577 /* Make molecules whole only for confout writing */
578 do_pbc_mtop(fplog, ir->ePBC, state->s.box, top_global,
579 state_global->x.rvec_array());
582 write_sto_conf_mtop(confout,
583 *top_global->name, top_global,
584 state_global->x.rvec_array(), nullptr, ir->ePBC, state->s.box);
589 //! \brief Do one minimization step
591 // \returns true when the step succeeded, false when a constraint error occurred
592 static bool do_em_step(const t_commrec *cr,
593 t_inputrec *ir, t_mdatoms *md,
594 em_state_t *ems1, real a, const PaddedVector<gmx::RVec> *force,
595 em_state_t *ems2,
596 gmx::Constraints *constr,
597 int64_t count)
600 t_state *s1, *s2;
601 int start, end;
602 real dvdl_constr;
603 int nthreads gmx_unused;
605 bool validStep = true;
607 s1 = &ems1->s;
608 s2 = &ems2->s;
610 if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
612 gmx_incons("state mismatch in do_em_step");
615 s2->flags = s1->flags;
617 if (s2->natoms != s1->natoms)
619 state_change_natoms(s2, s1->natoms);
620 ems2->f.resizeWithPadding(s2->natoms);
622 if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
624 s2->cg_gl.resize(s1->cg_gl.size());
627 copy_mat(s1->box, s2->box);
628 /* Copy free energy state */
629 s2->lambda = s1->lambda;
630 copy_mat(s1->box, s2->box);
632 start = 0;
633 end = md->homenr;
635 nthreads = gmx_omp_nthreads_get(emntUpdate);
636 #pragma omp parallel num_threads(nthreads)
638 const rvec *x1 = s1->x.rvec_array();
639 rvec *x2 = s2->x.rvec_array();
640 const rvec *f = force->rvec_array();
642 int gf = 0;
643 #pragma omp for schedule(static) nowait
644 for (int i = start; i < end; i++)
648 if (md->cFREEZE)
650 gf = md->cFREEZE[i];
652 for (int m = 0; m < DIM; m++)
654 if (ir->opts.nFreeze[gf][m])
656 x2[i][m] = x1[i][m];
658 else
660 x2[i][m] = x1[i][m] + a*f[i][m];
664 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
667 if (s2->flags & (1<<estCGP))
669 /* Copy the CG p vector */
670 const rvec *p1 = s1->cg_p.rvec_array();
671 rvec *p2 = s2->cg_p.rvec_array();
672 #pragma omp for schedule(static) nowait
673 for (int i = start; i < end; i++)
675 // Trivial OpenMP block that does not throw
676 copy_rvec(p1[i], p2[i]);
680 if (DOMAINDECOMP(cr))
682 s2->ddp_count = s1->ddp_count;
684 /* OpenMP does not supported unsigned loop variables */
685 #pragma omp for schedule(static) nowait
686 for (int i = 0; i < static_cast<int>(s2->cg_gl.size()); i++)
688 s2->cg_gl[i] = s1->cg_gl[i];
690 s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
694 if (constr)
696 dvdl_constr = 0;
697 validStep =
698 constr->apply(TRUE, TRUE,
699 count, 0, 1.0,
700 s1->x.rvec_array(), s2->x.rvec_array(),
701 nullptr, s2->box,
702 s2->lambda[efptBONDED], &dvdl_constr,
703 nullptr, nullptr, gmx::ConstraintVariable::Positions);
705 if (cr->nnodes > 1)
707 /* This global reduction will affect performance at high
708 * parallelization, but we can not really avoid it.
709 * But usually EM is not run at high parallelization.
711 int reductionBuffer = static_cast<int>(!validStep);
712 gmx_sumi(1, &reductionBuffer, cr);
713 validStep = (reductionBuffer == 0);
716 // We should move this check to the different minimizers
717 if (!validStep && ir->eI != eiSteep)
719 gmx_fatal(FARGS, "The coordinates could not be constrained. Minimizer '%s' can not handle constraint failures, use minimizer '%s' before using '%s'.",
720 EI(ir->eI), EI(eiSteep), EI(ir->eI));
724 return validStep;
727 //! Prepare EM for using domain decomposition parallellization
728 static void em_dd_partition_system(FILE *fplog,
729 const gmx::MDLogger &mdlog,
730 int step, const t_commrec *cr,
731 gmx_mtop_t *top_global, t_inputrec *ir,
732 em_state_t *ems, gmx_localtop_t *top,
733 gmx::MDAtoms *mdAtoms, t_forcerec *fr,
734 gmx_vsite_t *vsite, gmx::Constraints *constr,
735 t_nrnb *nrnb, gmx_wallcycle_t wcycle)
737 /* Repartition the domain decomposition */
738 dd_partition_system(fplog, mdlog, step, cr, FALSE, 1,
739 nullptr, top_global, ir,
740 &ems->s, &ems->f,
741 mdAtoms, top, fr, vsite, constr,
742 nrnb, wcycle, FALSE);
743 dd_store_state(cr->dd, &ems->s);
746 namespace
749 /*! \brief Class to handle the work of setting and doing an energy evaluation.
751 * This class is a mere aggregate of parameters to pass to evaluate an
752 * energy, so that future changes to names and types of them consume
753 * less time when refactoring other code.
755 * Aggregate initialization is used, for which the chief risk is that
756 * if a member is added at the end and not all initializer lists are
757 * updated, then the member will be value initialized, which will
758 * typically mean initialization to zero.
760 * We only want to construct one of these with an initializer list, so
761 * we explicitly delete the default constructor. */
762 class EnergyEvaluator
764 public:
765 //! We only intend to construct such objects with an initializer list.
766 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
767 // Aspects of the C++11 spec changed after GCC 4.8.5, and
768 // compilation of the initializer list construction in
769 // runner.cpp fails in GCC 4.8.5.
770 EnergyEvaluator() = delete;
771 #endif
772 /*! \brief Evaluates an energy on the state in \c ems.
774 * \todo In practice, the same objects mu_tot, vir, and pres
775 * are always passed to this function, so we would rather have
776 * them as data members. However, their C-array types are
777 * unsuited for aggregate initialization. When the types
778 * improve, the call signature of this method can be reduced.
780 void run(em_state_t *ems, rvec mu_tot,
781 tensor vir, tensor pres,
782 int64_t count, gmx_bool bFirst);
783 //! Handles logging (deprecated).
784 FILE *fplog;
785 //! Handles logging.
786 const gmx::MDLogger &mdlog;
787 //! Handles communication.
788 const t_commrec *cr;
789 //! Coordinates multi-simulations.
790 const gmx_multisim_t *ms;
791 //! Holds the simulation topology.
792 gmx_mtop_t *top_global;
793 //! Holds the domain topology.
794 gmx_localtop_t *top;
795 //! User input options.
796 t_inputrec *inputrec;
797 //! Manages flop accounting.
798 t_nrnb *nrnb;
799 //! Manages wall cycle accounting.
800 gmx_wallcycle_t wcycle;
801 //! Coordinates global reduction.
802 gmx_global_stat_t gstat;
803 //! Handles virtual sites.
804 gmx_vsite_t *vsite;
805 //! Handles constraints.
806 gmx::Constraints *constr;
807 //! Handles strange things.
808 t_fcdata *fcd;
809 //! Molecular graph for SHAKE.
810 t_graph *graph;
811 //! Per-atom data for this domain.
812 gmx::MDAtoms *mdAtoms;
813 //! Handles how to calculate the forces.
814 t_forcerec *fr;
815 //! Stores the computed energies.
816 gmx_enerdata_t *enerd;
819 void
820 EnergyEvaluator::run(em_state_t *ems, rvec mu_tot,
821 tensor vir, tensor pres,
822 int64_t count, gmx_bool bFirst)
824 real t;
825 gmx_bool bNS;
826 tensor force_vir, shake_vir, ekin;
827 real dvdl_constr, prescorr, enercorr, dvdlcorr;
828 real terminate = 0;
830 /* Set the time to the initial time, the time does not change during EM */
831 t = inputrec->init_t;
833 if (bFirst ||
834 (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
836 /* This is the first state or an old state used before the last ns */
837 bNS = TRUE;
839 else
841 bNS = FALSE;
842 if (inputrec->nstlist > 0)
844 bNS = TRUE;
848 if (vsite)
850 construct_vsites(vsite, ems->s.x.rvec_array(), 1, nullptr,
851 top->idef.iparams, top->idef.il,
852 fr->ePBC, fr->bMolPBC, cr, ems->s.box);
855 if (DOMAINDECOMP(cr) && bNS)
857 /* Repartition the domain decomposition */
858 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec,
859 ems, top, mdAtoms, fr, vsite, constr,
860 nrnb, wcycle);
863 /* Calc force & energy on new trial position */
864 /* do_force always puts the charge groups in the box and shifts again
865 * We do not unshift, so molecules are always whole in congrad.c
867 do_force(fplog, cr, ms, inputrec, nullptr, nullptr,
868 count, nrnb, wcycle, top, &top_global->groups,
869 ems->s.box, ems->s.x.arrayRefWithPadding(), &ems->s.hist,
870 ems->f.arrayRefWithPadding(), force_vir, mdAtoms->mdatoms(), enerd, fcd,
871 ems->s.lambda, graph, fr, vsite, mu_tot, t, nullptr,
872 GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES |
873 GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY |
874 (bNS ? GMX_FORCE_NS : 0),
875 DOMAINDECOMP(cr) ?
876 DdOpenBalanceRegionBeforeForceComputation::yes :
877 DdOpenBalanceRegionBeforeForceComputation::no,
878 DOMAINDECOMP(cr) ?
879 DdCloseBalanceRegionAfterForceComputation::yes :
880 DdCloseBalanceRegionAfterForceComputation::no);
882 /* Clear the unused shake virial and pressure */
883 clear_mat(shake_vir);
884 clear_mat(pres);
886 /* Communicate stuff when parallel */
887 if (PAR(cr) && inputrec->eI != eiNM)
889 wallcycle_start(wcycle, ewcMoveE);
891 global_stat(gstat, cr, enerd, force_vir, shake_vir, mu_tot,
892 inputrec, nullptr, nullptr, nullptr, 1, &terminate,
893 nullptr, FALSE,
894 CGLO_ENERGY |
895 CGLO_PRESSURE |
896 CGLO_CONSTRAINT);
898 wallcycle_stop(wcycle, ewcMoveE);
901 /* Calculate long range corrections to pressure and energy */
902 calc_dispcorr(inputrec, fr, ems->s.box, ems->s.lambda[efptVDW],
903 pres, force_vir, &prescorr, &enercorr, &dvdlcorr);
904 enerd->term[F_DISPCORR] = enercorr;
905 enerd->term[F_EPOT] += enercorr;
906 enerd->term[F_PRES] += prescorr;
907 enerd->term[F_DVDL] += dvdlcorr;
909 ems->epot = enerd->term[F_EPOT];
911 if (constr)
913 /* Project out the constraint components of the force */
914 dvdl_constr = 0;
915 rvec *f_rvec = ems->f.rvec_array();
916 constr->apply(FALSE, FALSE,
917 count, 0, 1.0,
918 ems->s.x.rvec_array(), f_rvec, f_rvec,
919 ems->s.box,
920 ems->s.lambda[efptBONDED], &dvdl_constr,
921 nullptr, &shake_vir, gmx::ConstraintVariable::ForceDispl);
922 enerd->term[F_DVDL_CONSTR] += dvdl_constr;
923 m_add(force_vir, shake_vir, vir);
925 else
927 copy_mat(force_vir, vir);
930 clear_mat(ekin);
931 enerd->term[F_PRES] =
932 calc_pres(fr->ePBC, inputrec->nwall, ems->s.box, ekin, vir, pres);
934 sum_dhdl(enerd, ems->s.lambda, inputrec->fepvals);
936 if (EI_ENERGY_MINIMIZATION(inputrec->eI))
938 get_state_f_norm_max(cr, &(inputrec->opts), mdAtoms->mdatoms(), ems);
942 } // namespace
944 //! Parallel utility summing energies and forces
945 static double reorder_partsum(const t_commrec *cr, t_grpopts *opts, t_mdatoms *mdatoms,
946 gmx_mtop_t *top_global,
947 em_state_t *s_min, em_state_t *s_b)
949 t_block *cgs_gl;
950 int ncg, *cg_gl, *index, c, cg, i, a0, a1, a, gf, m;
951 double partsum;
952 unsigned char *grpnrFREEZE;
954 if (debug)
956 fprintf(debug, "Doing reorder_partsum\n");
959 const rvec *fm = s_min->f.rvec_array();
960 const rvec *fb = s_b->f.rvec_array();
962 cgs_gl = dd_charge_groups_global(cr->dd);
963 index = cgs_gl->index;
965 /* Collect fm in a global vector fmg.
966 * This conflicts with the spirit of domain decomposition,
967 * but to fully optimize this a much more complicated algorithm is required.
969 rvec *fmg;
970 snew(fmg, top_global->natoms);
972 ncg = s_min->s.cg_gl.size();
973 cg_gl = s_min->s.cg_gl.data();
974 i = 0;
975 for (c = 0; c < ncg; c++)
977 cg = cg_gl[c];
978 a0 = index[cg];
979 a1 = index[cg+1];
980 for (a = a0; a < a1; a++)
982 copy_rvec(fm[i], fmg[a]);
983 i++;
986 gmx_sum(top_global->natoms*3, fmg[0], cr);
988 /* Now we will determine the part of the sum for the cgs in state s_b */
989 ncg = s_b->s.cg_gl.size();
990 cg_gl = s_b->s.cg_gl.data();
991 partsum = 0;
992 i = 0;
993 gf = 0;
994 grpnrFREEZE = top_global->groups.grpnr[egcFREEZE];
995 for (c = 0; c < ncg; c++)
997 cg = cg_gl[c];
998 a0 = index[cg];
999 a1 = index[cg+1];
1000 for (a = a0; a < a1; a++)
1002 if (mdatoms->cFREEZE && grpnrFREEZE)
1004 gf = grpnrFREEZE[i];
1006 for (m = 0; m < DIM; m++)
1008 if (!opts->nFreeze[gf][m])
1010 partsum += (fb[i][m] - fmg[a][m])*fb[i][m];
1013 i++;
1017 sfree(fmg);
1019 return partsum;
1022 //! Print some stuff, like beta, whatever that means.
1023 static real pr_beta(const t_commrec *cr, t_grpopts *opts, t_mdatoms *mdatoms,
1024 gmx_mtop_t *top_global,
1025 em_state_t *s_min, em_state_t *s_b)
1027 double sum;
1029 /* This is just the classical Polak-Ribiere calculation of beta;
1030 * it looks a bit complicated since we take freeze groups into account,
1031 * and might have to sum it in parallel runs.
1034 if (!DOMAINDECOMP(cr) ||
1035 (s_min->s.ddp_count == cr->dd->ddp_count &&
1036 s_b->s.ddp_count == cr->dd->ddp_count))
1038 const rvec *fm = s_min->f.rvec_array();
1039 const rvec *fb = s_b->f.rvec_array();
1040 sum = 0;
1041 int gf = 0;
1042 /* This part of code can be incorrect with DD,
1043 * since the atom ordering in s_b and s_min might differ.
1045 for (int i = 0; i < mdatoms->homenr; i++)
1047 if (mdatoms->cFREEZE)
1049 gf = mdatoms->cFREEZE[i];
1051 for (int m = 0; m < DIM; m++)
1053 if (!opts->nFreeze[gf][m])
1055 sum += (fb[i][m] - fm[i][m])*fb[i][m];
1060 else
1062 /* We need to reorder cgs while summing */
1063 sum = reorder_partsum(cr, opts, mdatoms, top_global, s_min, s_b);
1065 if (PAR(cr))
1067 gmx_sumd(1, &sum, cr);
1070 return sum/gmx::square(s_min->fnorm);
1073 namespace gmx
1076 void
1077 Integrator::do_cg()
1079 const char *CG = "Polak-Ribiere Conjugate Gradients";
1081 gmx_localtop_t *top;
1082 gmx_enerdata_t *enerd;
1083 gmx_global_stat_t gstat;
1084 t_graph *graph;
1085 double tmp, minstep;
1086 real stepsize;
1087 real a, b, c, beta = 0.0;
1088 real epot_repl = 0;
1089 real pnorm;
1090 t_mdebin *mdebin;
1091 gmx_bool converged, foundlower;
1092 rvec mu_tot;
1093 gmx_bool do_log = FALSE, do_ene = FALSE, do_x, do_f;
1094 tensor vir, pres;
1095 int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
1096 gmx_mdoutf_t outf;
1097 int m, step, nminstep;
1098 auto mdatoms = mdAtoms->mdatoms();
1100 GMX_LOG(mdlog.info).asParagraph().
1101 appendText("Note that activating conjugate gradient energy minimization via the "
1102 "integrator .mdp option and the command gmx mdrun may "
1103 "be available in a different form in a future version of GROMACS, "
1104 "e.g. gmx minimize and an .mdp option.");
1106 step = 0;
1108 if (MASTER(cr))
1110 // In CG, the state is extended with a search direction
1111 state_global->flags |= (1<<estCGP);
1113 // Ensure the extra per-atom state array gets allocated
1114 state_change_natoms(state_global, state_global->natoms);
1116 // Initialize the search direction to zero
1117 for (RVec &cg_p : state_global->cg_p)
1119 cg_p = { 0, 0, 0 };
1123 /* Create 4 states on the stack and extract pointers that we will swap */
1124 em_state_t s0 {}, s1 {}, s2 {}, s3 {};
1125 em_state_t *s_min = &s0;
1126 em_state_t *s_a = &s1;
1127 em_state_t *s_b = &s2;
1128 em_state_t *s_c = &s3;
1130 /* Init em and store the local state in s_min */
1131 init_em(fplog, mdlog, CG, cr, ms, outputProvider, inputrec, mdrunOptions,
1132 state_global, top_global, s_min, &top,
1133 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
1134 vsite, constr, nullptr,
1135 nfile, fnm, &outf, &mdebin, wcycle);
1137 /* Print to log file */
1138 print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
1140 /* Max number of steps */
1141 number_steps = inputrec->nsteps;
1143 if (MASTER(cr))
1145 sp_header(stderr, CG, inputrec->em_tol, number_steps);
1147 if (fplog)
1149 sp_header(fplog, CG, inputrec->em_tol, number_steps);
1152 EnergyEvaluator energyEvaluator {
1153 fplog, mdlog, cr, ms,
1154 top_global, top,
1155 inputrec, nrnb, wcycle, gstat,
1156 vsite, constr, fcd, graph,
1157 mdAtoms, fr, enerd
1159 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1160 /* do_force always puts the charge groups in the box and shifts again
1161 * We do not unshift, so molecules are always whole in congrad.c
1163 energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
1165 if (MASTER(cr))
1167 /* Copy stuff to the energy bin for easy printing etc. */
1168 matrix nullBox = {};
1169 upd_mdebin(mdebin, FALSE, FALSE, static_cast<double>(step),
1170 mdatoms->tmass, enerd, nullptr, nullptr, nullptr, nullBox,
1171 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1173 print_ebin_header(fplog, step, step);
1174 print_ebin(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, eprNORMAL,
1175 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1178 /* Estimate/guess the initial stepsize */
1179 stepsize = inputrec->em_stepsize/s_min->fnorm;
1181 if (MASTER(cr))
1183 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1184 fprintf(stderr, " F-max = %12.5e on atom %d\n",
1185 s_min->fmax, s_min->a_fmax+1);
1186 fprintf(stderr, " F-Norm = %12.5e\n",
1187 s_min->fnorm/sqrtNumAtoms);
1188 fprintf(stderr, "\n");
1189 /* and copy to the log file too... */
1190 fprintf(fplog, " F-max = %12.5e on atom %d\n",
1191 s_min->fmax, s_min->a_fmax+1);
1192 fprintf(fplog, " F-Norm = %12.5e\n",
1193 s_min->fnorm/sqrtNumAtoms);
1194 fprintf(fplog, "\n");
1196 /* Start the loop over CG steps.
1197 * Each successful step is counted, and we continue until
1198 * we either converge or reach the max number of steps.
1200 converged = FALSE;
1201 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1204 /* start taking steps in a new direction
1205 * First time we enter the routine, beta=0, and the direction is
1206 * simply the negative gradient.
1209 /* Calculate the new direction in p, and the gradient in this direction, gpa */
1210 rvec *pm = s_min->s.cg_p.rvec_array();
1211 const rvec *sfm = s_min->f.rvec_array();
1212 double gpa = 0;
1213 int gf = 0;
1214 for (int i = 0; i < mdatoms->homenr; i++)
1216 if (mdatoms->cFREEZE)
1218 gf = mdatoms->cFREEZE[i];
1220 for (m = 0; m < DIM; m++)
1222 if (!inputrec->opts.nFreeze[gf][m])
1224 pm[i][m] = sfm[i][m] + beta*pm[i][m];
1225 gpa -= pm[i][m]*sfm[i][m];
1226 /* f is negative gradient, thus the sign */
1228 else
1230 pm[i][m] = 0;
1235 /* Sum the gradient along the line across CPUs */
1236 if (PAR(cr))
1238 gmx_sumd(1, &gpa, cr);
1241 /* Calculate the norm of the search vector */
1242 get_f_norm_max(cr, &(inputrec->opts), mdatoms, pm, &pnorm, nullptr, nullptr);
1244 /* Just in case stepsize reaches zero due to numerical precision... */
1245 if (stepsize <= 0)
1247 stepsize = inputrec->em_stepsize/pnorm;
1251 * Double check the value of the derivative in the search direction.
1252 * If it is positive it must be due to the old information in the
1253 * CG formula, so just remove that and start over with beta=0.
1254 * This corresponds to a steepest descent step.
1256 if (gpa > 0)
1258 beta = 0;
1259 step--; /* Don't count this step since we are restarting */
1260 continue; /* Go back to the beginning of the big for-loop */
1263 /* Calculate minimum allowed stepsize, before the average (norm)
1264 * relative change in coordinate is smaller than precision
1266 minstep = 0;
1267 auto s_min_x = makeArrayRef(s_min->s.x);
1268 for (int i = 0; i < mdatoms->homenr; i++)
1270 for (m = 0; m < DIM; m++)
1272 tmp = fabs(s_min_x[i][m]);
1273 if (tmp < 1.0)
1275 tmp = 1.0;
1277 tmp = pm[i][m]/tmp;
1278 minstep += tmp*tmp;
1281 /* Add up from all CPUs */
1282 if (PAR(cr))
1284 gmx_sumd(1, &minstep, cr);
1287 minstep = GMX_REAL_EPS/sqrt(minstep/(3*top_global->natoms));
1289 if (stepsize < minstep)
1291 converged = TRUE;
1292 break;
1295 /* Write coordinates if necessary */
1296 do_x = do_per_step(step, inputrec->nstxout);
1297 do_f = do_per_step(step, inputrec->nstfout);
1299 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr,
1300 top_global, inputrec, step,
1301 s_min, state_global, observablesHistory);
1303 /* Take a step downhill.
1304 * In theory, we should minimize the function along this direction.
1305 * That is quite possible, but it turns out to take 5-10 function evaluations
1306 * for each line. However, we dont really need to find the exact minimum -
1307 * it is much better to start a new CG step in a modified direction as soon
1308 * as we are close to it. This will save a lot of energy evaluations.
1310 * In practice, we just try to take a single step.
1311 * If it worked (i.e. lowered the energy), we increase the stepsize but
1312 * the continue straight to the next CG step without trying to find any minimum.
1313 * If it didn't work (higher energy), there must be a minimum somewhere between
1314 * the old position and the new one.
1316 * Due to the finite numerical accuracy, it turns out that it is a good idea
1317 * to even accept a SMALL increase in energy, if the derivative is still downhill.
1318 * This leads to lower final energies in the tests I've done. / Erik
1320 s_a->epot = s_min->epot;
1321 a = 0.0;
1322 c = a + stepsize; /* reference position along line is zero */
1324 if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
1326 em_dd_partition_system(fplog, mdlog, step, cr, top_global, inputrec,
1327 s_min, top, mdAtoms, fr, vsite, constr,
1328 nrnb, wcycle);
1331 /* Take a trial step (new coords in s_c) */
1332 do_em_step(cr, inputrec, mdatoms, s_min, c, &s_min->s.cg_p, s_c,
1333 constr, -1);
1335 neval++;
1336 /* Calculate energy for the trial step */
1337 energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
1339 /* Calc derivative along line */
1340 const rvec *pc = s_c->s.cg_p.rvec_array();
1341 const rvec *sfc = s_c->f.rvec_array();
1342 double gpc = 0;
1343 for (int i = 0; i < mdatoms->homenr; i++)
1345 for (m = 0; m < DIM; m++)
1347 gpc -= pc[i][m]*sfc[i][m]; /* f is negative gradient, thus the sign */
1350 /* Sum the gradient along the line across CPUs */
1351 if (PAR(cr))
1353 gmx_sumd(1, &gpc, cr);
1356 /* This is the max amount of increase in energy we tolerate */
1357 tmp = std::sqrt(GMX_REAL_EPS)*fabs(s_a->epot);
1359 /* Accept the step if the energy is lower, or if it is not significantly higher
1360 * and the line derivative is still negative.
1362 if (s_c->epot < s_a->epot || (gpc < 0 && s_c->epot < (s_a->epot + tmp)))
1364 foundlower = TRUE;
1365 /* Great, we found a better energy. Increase step for next iteration
1366 * if we are still going down, decrease it otherwise
1368 if (gpc < 0)
1370 stepsize *= 1.618034; /* The golden section */
1372 else
1374 stepsize *= 0.618034; /* 1/golden section */
1377 else
1379 /* New energy is the same or higher. We will have to do some work
1380 * to find a smaller value in the interval. Take smaller step next time!
1382 foundlower = FALSE;
1383 stepsize *= 0.618034;
1389 /* OK, if we didn't find a lower value we will have to locate one now - there must
1390 * be one in the interval [a=0,c].
1391 * The same thing is valid here, though: Don't spend dozens of iterations to find
1392 * the line minimum. We try to interpolate based on the derivative at the endpoints,
1393 * and only continue until we find a lower value. In most cases this means 1-2 iterations.
1395 * I also have a safeguard for potentially really pathological functions so we never
1396 * take more than 20 steps before we give up ...
1398 * If we already found a lower value we just skip this step and continue to the update.
1400 double gpb;
1401 if (!foundlower)
1403 nminstep = 0;
1407 /* Select a new trial point.
1408 * If the derivatives at points a & c have different sign we interpolate to zero,
1409 * otherwise just do a bisection.
1411 if (gpa < 0 && gpc > 0)
1413 b = a + gpa*(a-c)/(gpc-gpa);
1415 else
1417 b = 0.5*(a+c);
1420 /* safeguard if interpolation close to machine accuracy causes errors:
1421 * never go outside the interval
1423 if (b <= a || b >= c)
1425 b = 0.5*(a+c);
1428 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
1430 /* Reload the old state */
1431 em_dd_partition_system(fplog, mdlog, -1, cr, top_global, inputrec,
1432 s_min, top, mdAtoms, fr, vsite, constr,
1433 nrnb, wcycle);
1436 /* Take a trial step to this new point - new coords in s_b */
1437 do_em_step(cr, inputrec, mdatoms, s_min, b, &s_min->s.cg_p, s_b,
1438 constr, -1);
1440 neval++;
1441 /* Calculate energy for the trial step */
1442 energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
1444 /* p does not change within a step, but since the domain decomposition
1445 * might change, we have to use cg_p of s_b here.
1447 const rvec *pb = s_b->s.cg_p.rvec_array();
1448 const rvec *sfb = s_b->f.rvec_array();
1449 gpb = 0;
1450 for (int i = 0; i < mdatoms->homenr; i++)
1452 for (m = 0; m < DIM; m++)
1454 gpb -= pb[i][m]*sfb[i][m]; /* f is negative gradient, thus the sign */
1457 /* Sum the gradient along the line across CPUs */
1458 if (PAR(cr))
1460 gmx_sumd(1, &gpb, cr);
1463 if (debug)
1465 fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n",
1466 s_a->epot, s_b->epot, s_c->epot, gpb);
1469 epot_repl = s_b->epot;
1471 /* Keep one of the intervals based on the value of the derivative at the new point */
1472 if (gpb > 0)
1474 /* Replace c endpoint with b */
1475 swap_em_state(&s_b, &s_c);
1476 c = b;
1477 gpc = gpb;
1479 else
1481 /* Replace a endpoint with b */
1482 swap_em_state(&s_b, &s_a);
1483 a = b;
1484 gpa = gpb;
1488 * Stop search as soon as we find a value smaller than the endpoints.
1489 * Never run more than 20 steps, no matter what.
1491 nminstep++;
1493 while ((epot_repl > s_a->epot || epot_repl > s_c->epot) &&
1494 (nminstep < 20));
1496 if (std::fabs(epot_repl - s_min->epot) < fabs(s_min->epot)*GMX_REAL_EPS ||
1497 nminstep >= 20)
1499 /* OK. We couldn't find a significantly lower energy.
1500 * If beta==0 this was steepest descent, and then we give up.
1501 * If not, set beta=0 and restart with steepest descent before quitting.
1503 if (beta == 0.0)
1505 /* Converged */
1506 converged = TRUE;
1507 break;
1509 else
1511 /* Reset memory before giving up */
1512 beta = 0.0;
1513 continue;
1517 /* Select min energy state of A & C, put the best in B.
1519 if (s_c->epot < s_a->epot)
1521 if (debug)
1523 fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n",
1524 s_c->epot, s_a->epot);
1526 swap_em_state(&s_b, &s_c);
1527 gpb = gpc;
1529 else
1531 if (debug)
1533 fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n",
1534 s_a->epot, s_c->epot);
1536 swap_em_state(&s_b, &s_a);
1537 gpb = gpa;
1541 else
1543 if (debug)
1545 fprintf(debug, "CGE: Found a lower energy %f, moving C to B\n",
1546 s_c->epot);
1548 swap_em_state(&s_b, &s_c);
1549 gpb = gpc;
1552 /* new search direction */
1553 /* beta = 0 means forget all memory and restart with steepest descents. */
1554 if (nstcg && ((step % nstcg) == 0))
1556 beta = 0.0;
1558 else
1560 /* s_min->fnorm cannot be zero, because then we would have converged
1561 * and broken out.
1564 /* Polak-Ribiere update.
1565 * Change to fnorm2/fnorm2_old for Fletcher-Reeves
1567 beta = pr_beta(cr, &inputrec->opts, mdatoms, top_global, s_min, s_b);
1569 /* Limit beta to prevent oscillations */
1570 if (fabs(beta) > 5.0)
1572 beta = 0.0;
1576 /* update positions */
1577 swap_em_state(&s_min, &s_b);
1578 gpa = gpb;
1580 /* Print it if necessary */
1581 if (MASTER(cr))
1583 if (mdrunOptions.verbose)
1585 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1586 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
1587 step, s_min->epot, s_min->fnorm/sqrtNumAtoms,
1588 s_min->fmax, s_min->a_fmax+1);
1589 fflush(stderr);
1591 /* Store the new (lower) energies */
1592 matrix nullBox = {};
1593 upd_mdebin(mdebin, FALSE, FALSE, static_cast<double>(step),
1594 mdatoms->tmass, enerd, nullptr, nullptr, nullptr, nullBox,
1595 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1597 do_log = do_per_step(step, inputrec->nstlog);
1598 do_ene = do_per_step(step, inputrec->nstenergy);
1600 /* Prepare IMD energy record, if bIMD is TRUE. */
1601 IMD_fill_energy_record(inputrec->bIMD, inputrec->imd, enerd, step, TRUE);
1603 if (do_log)
1605 print_ebin_header(fplog, step, step);
1607 print_ebin(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
1608 do_log ? fplog : nullptr, step, step, eprNORMAL,
1609 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1612 /* Send energies and positions to the IMD client if bIMD is TRUE. */
1613 if (MASTER(cr) && do_IMD(inputrec->bIMD, step, cr, TRUE, state_global->box, state_global->x.rvec_array(), inputrec, 0, wcycle))
1615 IMD_send_positions(inputrec->imd);
1618 /* Stop when the maximum force lies below tolerance.
1619 * If we have reached machine precision, converged is already set to true.
1621 converged = converged || (s_min->fmax < inputrec->em_tol);
1623 } /* End of the loop */
1625 /* IMD cleanup, if bIMD is TRUE. */
1626 IMD_finalize(inputrec->bIMD, inputrec->imd);
1628 if (converged)
1630 step--; /* we never took that last step in this case */
1633 if (s_min->fmax > inputrec->em_tol)
1635 if (MASTER(cr))
1637 warn_step(fplog, inputrec->em_tol, s_min->fmax,
1638 step-1 == number_steps, FALSE);
1640 converged = FALSE;
1643 if (MASTER(cr))
1645 /* If we printed energy and/or logfile last step (which was the last step)
1646 * we don't have to do it again, but otherwise print the final values.
1648 if (!do_log)
1650 /* Write final value to log since we didn't do anything the last step */
1651 print_ebin_header(fplog, step, step);
1653 if (!do_ene || !do_log)
1655 /* Write final energy file entries */
1656 print_ebin(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
1657 !do_log ? fplog : nullptr, step, step, eprNORMAL,
1658 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1662 /* Print some stuff... */
1663 if (MASTER(cr))
1665 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
1668 /* IMPORTANT!
1669 * For accurate normal mode calculation it is imperative that we
1670 * store the last conformation into the full precision binary trajectory.
1672 * However, we should only do it if we did NOT already write this step
1673 * above (which we did if do_x or do_f was true).
1675 /* Note that with 0 < nstfout != nstxout we can end up with two frames
1676 * in the trajectory with the same step number.
1678 do_x = !do_per_step(step, inputrec->nstxout);
1679 do_f = (inputrec->nstfout > 0 && !do_per_step(step, inputrec->nstfout));
1681 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm),
1682 top_global, inputrec, step,
1683 s_min, state_global, observablesHistory);
1686 if (MASTER(cr))
1688 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1689 print_converged(stderr, CG, inputrec->em_tol, step, converged, number_steps,
1690 s_min, sqrtNumAtoms);
1691 print_converged(fplog, CG, inputrec->em_tol, step, converged, number_steps,
1692 s_min, sqrtNumAtoms);
1694 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
1697 finish_em(cr, outf, walltime_accounting, wcycle);
1699 /* To print the actual number of steps we needed somewhere */
1700 walltime_accounting_set_nsteps_done(walltime_accounting, step);
1704 void
1705 Integrator::do_lbfgs()
1707 static const char *LBFGS = "Low-Memory BFGS Minimizer";
1708 em_state_t ems;
1709 gmx_localtop_t *top;
1710 gmx_enerdata_t *enerd;
1711 gmx_global_stat_t gstat;
1712 t_graph *graph;
1713 int ncorr, nmaxcorr, point, cp, neval, nminstep;
1714 double stepsize, step_taken, gpa, gpb, gpc, tmp, minstep;
1715 real *rho, *alpha, *p, *s, **dx, **dg;
1716 real a, b, c, maxdelta, delta;
1717 real diag, Epot0;
1718 real dgdx, dgdg, sq, yr, beta;
1719 t_mdebin *mdebin;
1720 gmx_bool converged;
1721 rvec mu_tot;
1722 gmx_bool do_log, do_ene, do_x, do_f, foundlower, *frozen;
1723 tensor vir, pres;
1724 int start, end, number_steps;
1725 gmx_mdoutf_t outf;
1726 int i, k, m, n, gf, step;
1727 int mdof_flags;
1728 auto mdatoms = mdAtoms->mdatoms();
1730 GMX_LOG(mdlog.info).asParagraph().
1731 appendText("Note that activating L-BFGS energy minimization via the "
1732 "integrator .mdp option and the command gmx mdrun may "
1733 "be available in a different form in a future version of GROMACS, "
1734 "e.g. gmx minimize and an .mdp option.");
1736 if (PAR(cr))
1738 gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
1741 if (nullptr != constr)
1743 gmx_fatal(FARGS, "The combination of constraints and L-BFGS minimization is not implemented. Either do not use constraints, or use another minimizer (e.g. steepest descent).");
1746 n = 3*state_global->natoms;
1747 nmaxcorr = inputrec->nbfgscorr;
1749 snew(frozen, n);
1751 snew(p, n);
1752 snew(rho, nmaxcorr);
1753 snew(alpha, nmaxcorr);
1755 snew(dx, nmaxcorr);
1756 for (i = 0; i < nmaxcorr; i++)
1758 snew(dx[i], n);
1761 snew(dg, nmaxcorr);
1762 for (i = 0; i < nmaxcorr; i++)
1764 snew(dg[i], n);
1767 step = 0;
1768 neval = 0;
1770 /* Init em */
1771 init_em(fplog, mdlog, LBFGS, cr, ms, outputProvider, inputrec, mdrunOptions,
1772 state_global, top_global, &ems, &top,
1773 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
1774 vsite, constr, nullptr,
1775 nfile, fnm, &outf, &mdebin, wcycle);
1777 start = 0;
1778 end = mdatoms->homenr;
1780 /* We need 4 working states */
1781 em_state_t s0 {}, s1 {}, s2 {}, s3 {};
1782 em_state_t *sa = &s0;
1783 em_state_t *sb = &s1;
1784 em_state_t *sc = &s2;
1785 em_state_t *last = &s3;
1786 /* Initialize by copying the state from ems (we could skip x and f here) */
1787 *sa = ems;
1788 *sb = ems;
1789 *sc = ems;
1791 /* Print to log file */
1792 print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
1794 do_log = do_ene = do_x = do_f = TRUE;
1796 /* Max number of steps */
1797 number_steps = inputrec->nsteps;
1799 /* Create a 3*natoms index to tell whether each degree of freedom is frozen */
1800 gf = 0;
1801 for (i = start; i < end; i++)
1803 if (mdatoms->cFREEZE)
1805 gf = mdatoms->cFREEZE[i];
1807 for (m = 0; m < DIM; m++)
1809 frozen[3*i+m] = (inputrec->opts.nFreeze[gf][m] != 0);
1812 if (MASTER(cr))
1814 sp_header(stderr, LBFGS, inputrec->em_tol, number_steps);
1816 if (fplog)
1818 sp_header(fplog, LBFGS, inputrec->em_tol, number_steps);
1821 if (vsite)
1823 construct_vsites(vsite, state_global->x.rvec_array(), 1, nullptr,
1824 top->idef.iparams, top->idef.il,
1825 fr->ePBC, fr->bMolPBC, cr, state_global->box);
1828 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1829 /* do_force always puts the charge groups in the box and shifts again
1830 * We do not unshift, so molecules are always whole
1832 neval++;
1833 EnergyEvaluator energyEvaluator {
1834 fplog, mdlog, cr, ms,
1835 top_global, top,
1836 inputrec, nrnb, wcycle, gstat,
1837 vsite, constr, fcd, graph,
1838 mdAtoms, fr, enerd
1840 energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
1842 if (MASTER(cr))
1844 /* Copy stuff to the energy bin for easy printing etc. */
1845 matrix nullBox = {};
1846 upd_mdebin(mdebin, FALSE, FALSE, static_cast<double>(step),
1847 mdatoms->tmass, enerd, nullptr, nullptr, nullptr, nullBox,
1848 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1850 print_ebin_header(fplog, step, step);
1851 print_ebin(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step, step, eprNORMAL,
1852 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
1855 /* Set the initial step.
1856 * since it will be multiplied by the non-normalized search direction
1857 * vector (force vector the first time), we scale it by the
1858 * norm of the force.
1861 if (MASTER(cr))
1863 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1864 fprintf(stderr, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1865 fprintf(stderr, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1866 fprintf(stderr, " F-Norm = %12.5e\n", ems.fnorm/sqrtNumAtoms);
1867 fprintf(stderr, "\n");
1868 /* and copy to the log file too... */
1869 fprintf(fplog, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1870 fprintf(fplog, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1871 fprintf(fplog, " F-Norm = %12.5e\n", ems.fnorm/sqrtNumAtoms);
1872 fprintf(fplog, "\n");
1875 // Point is an index to the memory of search directions, where 0 is the first one.
1876 point = 0;
1878 // Set initial search direction to the force (-gradient), or 0 for frozen particles.
1879 real *fInit = static_cast<real *>(ems.f.rvec_array()[0]);
1880 for (i = 0; i < n; i++)
1882 if (!frozen[i])
1884 dx[point][i] = fInit[i]; /* Initial search direction */
1886 else
1888 dx[point][i] = 0;
1892 // Stepsize will be modified during the search, and actually it is not critical
1893 // (the main efficiency in the algorithm comes from changing directions), but
1894 // we still need an initial value, so estimate it as the inverse of the norm
1895 // so we take small steps where the potential fluctuates a lot.
1896 stepsize = 1.0/ems.fnorm;
1898 /* Start the loop over BFGS steps.
1899 * Each successful step is counted, and we continue until
1900 * we either converge or reach the max number of steps.
1903 ncorr = 0;
1905 /* Set the gradient from the force */
1906 converged = FALSE;
1907 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1910 /* Write coordinates if necessary */
1911 do_x = do_per_step(step, inputrec->nstxout);
1912 do_f = do_per_step(step, inputrec->nstfout);
1914 mdof_flags = 0;
1915 if (do_x)
1917 mdof_flags |= MDOF_X;
1920 if (do_f)
1922 mdof_flags |= MDOF_F;
1925 if (inputrec->bIMD)
1927 mdof_flags |= MDOF_IMD;
1930 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags,
1931 top_global, step, static_cast<real>(step), &ems.s, state_global, observablesHistory, ems.f);
1933 /* Do the linesearching in the direction dx[point][0..(n-1)] */
1935 /* make s a pointer to current search direction - point=0 first time we get here */
1936 s = dx[point];
1938 real *xx = static_cast<real *>(ems.s.x.rvec_array()[0]);
1939 real *ff = static_cast<real *>(ems.f.rvec_array()[0]);
1941 // calculate line gradient in position A
1942 for (gpa = 0, i = 0; i < n; i++)
1944 gpa -= s[i]*ff[i];
1947 /* Calculate minimum allowed stepsize along the line, before the average (norm)
1948 * relative change in coordinate is smaller than precision
1950 for (minstep = 0, i = 0; i < n; i++)
1952 tmp = fabs(xx[i]);
1953 if (tmp < 1.0)
1955 tmp = 1.0;
1957 tmp = s[i]/tmp;
1958 minstep += tmp*tmp;
1960 minstep = GMX_REAL_EPS/sqrt(minstep/n);
1962 if (stepsize < minstep)
1964 converged = TRUE;
1965 break;
1968 // Before taking any steps along the line, store the old position
1969 *last = ems;
1970 real *lastx = static_cast<real *>(last->s.x.data()[0]);
1971 real *lastf = static_cast<real *>(last->f.data()[0]);
1972 Epot0 = ems.epot;
1974 *sa = ems;
1976 /* Take a step downhill.
1977 * In theory, we should find the actual minimum of the function in this
1978 * direction, somewhere along the line.
1979 * That is quite possible, but it turns out to take 5-10 function evaluations
1980 * for each line. However, we dont really need to find the exact minimum -
1981 * it is much better to start a new BFGS step in a modified direction as soon
1982 * as we are close to it. This will save a lot of energy evaluations.
1984 * In practice, we just try to take a single step.
1985 * If it worked (i.e. lowered the energy), we increase the stepsize but
1986 * continue straight to the next BFGS step without trying to find any minimum,
1987 * i.e. we change the search direction too. If the line was smooth, it is
1988 * likely we are in a smooth region, and then it makes sense to take longer
1989 * steps in the modified search direction too.
1991 * If it didn't work (higher energy), there must be a minimum somewhere between
1992 * the old position and the new one. Then we need to start by finding a lower
1993 * value before we change search direction. Since the energy was apparently
1994 * quite rough, we need to decrease the step size.
1996 * Due to the finite numerical accuracy, it turns out that it is a good idea
1997 * to accept a SMALL increase in energy, if the derivative is still downhill.
1998 * This leads to lower final energies in the tests I've done. / Erik
2001 // State "A" is the first position along the line.
2002 // reference position along line is initially zero
2003 a = 0.0;
2005 // Check stepsize first. We do not allow displacements
2006 // larger than emstep.
2010 // Pick a new position C by adding stepsize to A.
2011 c = a + stepsize;
2013 // Calculate what the largest change in any individual coordinate
2014 // would be (translation along line * gradient along line)
2015 maxdelta = 0;
2016 for (i = 0; i < n; i++)
2018 delta = c*s[i];
2019 if (delta > maxdelta)
2021 maxdelta = delta;
2024 // If any displacement is larger than the stepsize limit, reduce the step
2025 if (maxdelta > inputrec->em_stepsize)
2027 stepsize *= 0.1;
2030 while (maxdelta > inputrec->em_stepsize);
2032 // Take a trial step and move the coordinate array xc[] to position C
2033 real *xc = static_cast<real *>(sc->s.x.rvec_array()[0]);
2034 for (i = 0; i < n; i++)
2036 xc[i] = lastx[i] + c*s[i];
2039 neval++;
2040 // Calculate energy for the trial step in position C
2041 energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
2043 // Calc line gradient in position C
2044 real *fc = static_cast<real *>(sc->f.rvec_array()[0]);
2045 for (gpc = 0, i = 0; i < n; i++)
2047 gpc -= s[i]*fc[i]; /* f is negative gradient, thus the sign */
2049 /* Sum the gradient along the line across CPUs */
2050 if (PAR(cr))
2052 gmx_sumd(1, &gpc, cr);
2055 // This is the max amount of increase in energy we tolerate.
2056 // By allowing VERY small changes (close to numerical precision) we
2057 // frequently find even better (lower) final energies.
2058 tmp = std::sqrt(GMX_REAL_EPS)*fabs(sa->epot);
2060 // Accept the step if the energy is lower in the new position C (compared to A),
2061 // or if it is not significantly higher and the line derivative is still negative.
2062 foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
2063 // If true, great, we found a better energy. We no longer try to alter the
2064 // stepsize, but simply accept this new better position. The we select a new
2065 // search direction instead, which will be much more efficient than continuing
2066 // to take smaller steps along a line. Set fnorm based on the new C position,
2067 // which will be used to update the stepsize to 1/fnorm further down.
2069 // If false, the energy is NOT lower in point C, i.e. it will be the same
2070 // or higher than in point A. In this case it is pointless to move to point C,
2071 // so we will have to do more iterations along the same line to find a smaller
2072 // value in the interval [A=0.0,C].
2073 // Here, A is still 0.0, but that will change when we do a search in the interval
2074 // [0.0,C] below. That search we will do by interpolation or bisection rather
2075 // than with the stepsize, so no need to modify it. For the next search direction
2076 // it will be reset to 1/fnorm anyway.
2078 if (!foundlower)
2080 // OK, if we didn't find a lower value we will have to locate one now - there must
2081 // be one in the interval [a,c].
2082 // The same thing is valid here, though: Don't spend dozens of iterations to find
2083 // the line minimum. We try to interpolate based on the derivative at the endpoints,
2084 // and only continue until we find a lower value. In most cases this means 1-2 iterations.
2085 // I also have a safeguard for potentially really pathological functions so we never
2086 // take more than 20 steps before we give up.
2087 // If we already found a lower value we just skip this step and continue to the update.
2088 real fnorm = 0;
2089 nminstep = 0;
2092 // Select a new trial point B in the interval [A,C].
2093 // If the derivatives at points a & c have different sign we interpolate to zero,
2094 // otherwise just do a bisection since there might be multiple minima/maxima
2095 // inside the interval.
2096 if (gpa < 0 && gpc > 0)
2098 b = a + gpa*(a-c)/(gpc-gpa);
2100 else
2102 b = 0.5*(a+c);
2105 /* safeguard if interpolation close to machine accuracy causes errors:
2106 * never go outside the interval
2108 if (b <= a || b >= c)
2110 b = 0.5*(a+c);
2113 // Take a trial step to point B
2114 real *xb = static_cast<real *>(sb->s.x.rvec_array()[0]);
2115 for (i = 0; i < n; i++)
2117 xb[i] = lastx[i] + b*s[i];
2120 neval++;
2121 // Calculate energy for the trial step in point B
2122 energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
2123 fnorm = sb->fnorm;
2125 // Calculate gradient in point B
2126 real *fb = static_cast<real *>(sb->f.rvec_array()[0]);
2127 for (gpb = 0, i = 0; i < n; i++)
2129 gpb -= s[i]*fb[i]; /* f is negative gradient, thus the sign */
2132 /* Sum the gradient along the line across CPUs */
2133 if (PAR(cr))
2135 gmx_sumd(1, &gpb, cr);
2138 // Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
2139 // at the new point B, and rename the endpoints of this new interval A and C.
2140 if (gpb > 0)
2142 /* Replace c endpoint with b */
2143 c = b;
2144 /* swap states b and c */
2145 swap_em_state(&sb, &sc);
2147 else
2149 /* Replace a endpoint with b */
2150 a = b;
2151 /* swap states a and b */
2152 swap_em_state(&sa, &sb);
2156 * Stop search as soon as we find a value smaller than the endpoints,
2157 * or if the tolerance is below machine precision.
2158 * Never run more than 20 steps, no matter what.
2160 nminstep++;
2162 while ((sb->epot > sa->epot || sb->epot > sc->epot) && (nminstep < 20));
2164 if (std::fabs(sb->epot - Epot0) < GMX_REAL_EPS || nminstep >= 20)
2166 /* OK. We couldn't find a significantly lower energy.
2167 * If ncorr==0 this was steepest descent, and then we give up.
2168 * If not, reset memory to restart as steepest descent before quitting.
2170 if (ncorr == 0)
2172 /* Converged */
2173 converged = TRUE;
2174 break;
2176 else
2178 /* Reset memory */
2179 ncorr = 0;
2180 /* Search in gradient direction */
2181 for (i = 0; i < n; i++)
2183 dx[point][i] = ff[i];
2185 /* Reset stepsize */
2186 stepsize = 1.0/fnorm;
2187 continue;
2191 /* Select min energy state of A & C, put the best in xx/ff/Epot
2193 if (sc->epot < sa->epot)
2195 /* Use state C */
2196 ems = *sc;
2197 step_taken = c;
2199 else
2201 /* Use state A */
2202 ems = *sa;
2203 step_taken = a;
2207 else
2209 /* found lower */
2210 /* Use state C */
2211 ems = *sc;
2212 step_taken = c;
2215 /* Update the memory information, and calculate a new
2216 * approximation of the inverse hessian
2219 /* Have new data in Epot, xx, ff */
2220 if (ncorr < nmaxcorr)
2222 ncorr++;
2225 for (i = 0; i < n; i++)
2227 dg[point][i] = lastf[i]-ff[i];
2228 dx[point][i] *= step_taken;
2231 dgdg = 0;
2232 dgdx = 0;
2233 for (i = 0; i < n; i++)
2235 dgdg += dg[point][i]*dg[point][i];
2236 dgdx += dg[point][i]*dx[point][i];
2239 diag = dgdx/dgdg;
2241 rho[point] = 1.0/dgdx;
2242 point++;
2244 if (point >= nmaxcorr)
2246 point = 0;
2249 /* Update */
2250 for (i = 0; i < n; i++)
2252 p[i] = ff[i];
2255 cp = point;
2257 /* Recursive update. First go back over the memory points */
2258 for (k = 0; k < ncorr; k++)
2260 cp--;
2261 if (cp < 0)
2263 cp = ncorr-1;
2266 sq = 0;
2267 for (i = 0; i < n; i++)
2269 sq += dx[cp][i]*p[i];
2272 alpha[cp] = rho[cp]*sq;
2274 for (i = 0; i < n; i++)
2276 p[i] -= alpha[cp]*dg[cp][i];
2280 for (i = 0; i < n; i++)
2282 p[i] *= diag;
2285 /* And then go forward again */
2286 for (k = 0; k < ncorr; k++)
2288 yr = 0;
2289 for (i = 0; i < n; i++)
2291 yr += p[i]*dg[cp][i];
2294 beta = rho[cp]*yr;
2295 beta = alpha[cp]-beta;
2297 for (i = 0; i < n; i++)
2299 p[i] += beta*dx[cp][i];
2302 cp++;
2303 if (cp >= ncorr)
2305 cp = 0;
2309 for (i = 0; i < n; i++)
2311 if (!frozen[i])
2313 dx[point][i] = p[i];
2315 else
2317 dx[point][i] = 0;
2321 /* Print it if necessary */
2322 if (MASTER(cr))
2324 if (mdrunOptions.verbose)
2326 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2327 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
2328 step, ems.epot, ems.fnorm/sqrtNumAtoms, ems.fmax, ems.a_fmax + 1);
2329 fflush(stderr);
2331 /* Store the new (lower) energies */
2332 matrix nullBox = {};
2333 upd_mdebin(mdebin, FALSE, FALSE, static_cast<double>(step),
2334 mdatoms->tmass, enerd, nullptr, nullptr, nullptr, nullBox,
2335 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
2336 do_log = do_per_step(step, inputrec->nstlog);
2337 do_ene = do_per_step(step, inputrec->nstenergy);
2338 if (do_log)
2340 print_ebin_header(fplog, step, step);
2342 print_ebin(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
2343 do_log ? fplog : nullptr, step, step, eprNORMAL,
2344 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
2347 /* Send x and E to IMD client, if bIMD is TRUE. */
2348 if (do_IMD(inputrec->bIMD, step, cr, TRUE, state_global->box, state_global->x.rvec_array(), inputrec, 0, wcycle) && MASTER(cr))
2350 IMD_send_positions(inputrec->imd);
2353 // Reset stepsize in we are doing more iterations
2354 stepsize = 1.0/ems.fnorm;
2356 /* Stop when the maximum force lies below tolerance.
2357 * If we have reached machine precision, converged is already set to true.
2359 converged = converged || (ems.fmax < inputrec->em_tol);
2361 } /* End of the loop */
2363 /* IMD cleanup, if bIMD is TRUE. */
2364 IMD_finalize(inputrec->bIMD, inputrec->imd);
2366 if (converged)
2368 step--; /* we never took that last step in this case */
2371 if (ems.fmax > inputrec->em_tol)
2373 if (MASTER(cr))
2375 warn_step(fplog, inputrec->em_tol, ems.fmax,
2376 step-1 == number_steps, FALSE);
2378 converged = FALSE;
2381 /* If we printed energy and/or logfile last step (which was the last step)
2382 * we don't have to do it again, but otherwise print the final values.
2384 if (!do_log) /* Write final value to log since we didn't do anythin last step */
2386 print_ebin_header(fplog, step, step);
2388 if (!do_ene || !do_log) /* Write final energy file entries */
2390 print_ebin(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
2391 !do_log ? fplog : nullptr, step, step, eprNORMAL,
2392 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
2395 /* Print some stuff... */
2396 if (MASTER(cr))
2398 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2401 /* IMPORTANT!
2402 * For accurate normal mode calculation it is imperative that we
2403 * store the last conformation into the full precision binary trajectory.
2405 * However, we should only do it if we did NOT already write this step
2406 * above (which we did if do_x or do_f was true).
2408 do_x = !do_per_step(step, inputrec->nstxout);
2409 do_f = !do_per_step(step, inputrec->nstfout);
2410 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm),
2411 top_global, inputrec, step,
2412 &ems, state_global, observablesHistory);
2414 if (MASTER(cr))
2416 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2417 print_converged(stderr, LBFGS, inputrec->em_tol, step, converged,
2418 number_steps, &ems, sqrtNumAtoms);
2419 print_converged(fplog, LBFGS, inputrec->em_tol, step, converged,
2420 number_steps, &ems, sqrtNumAtoms);
2422 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
2425 finish_em(cr, outf, walltime_accounting, wcycle);
2427 /* To print the actual number of steps we needed somewhere */
2428 walltime_accounting_set_nsteps_done(walltime_accounting, step);
2431 void
2432 Integrator::do_steep()
2434 const char *SD = "Steepest Descents";
2435 gmx_localtop_t *top;
2436 gmx_enerdata_t *enerd;
2437 gmx_global_stat_t gstat;
2438 t_graph *graph;
2439 real stepsize;
2440 real ustep;
2441 gmx_mdoutf_t outf;
2442 t_mdebin *mdebin;
2443 gmx_bool bDone, bAbort, do_x, do_f;
2444 tensor vir, pres;
2445 rvec mu_tot;
2446 int nsteps;
2447 int count = 0;
2448 int steps_accepted = 0;
2449 auto mdatoms = mdAtoms->mdatoms();
2451 GMX_LOG(mdlog.info).asParagraph().
2452 appendText("Note that activating steepest-descent energy minimization via the "
2453 "integrator .mdp option and the command gmx mdrun may "
2454 "be available in a different form in a future version of GROMACS, "
2455 "e.g. gmx minimize and an .mdp option.");
2457 /* Create 2 states on the stack and extract pointers that we will swap */
2458 em_state_t s0 {}, s1 {};
2459 em_state_t *s_min = &s0;
2460 em_state_t *s_try = &s1;
2462 /* Init em and store the local state in s_try */
2463 init_em(fplog, mdlog, SD, cr, ms, outputProvider, inputrec, mdrunOptions,
2464 state_global, top_global, s_try, &top,
2465 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
2466 vsite, constr, nullptr,
2467 nfile, fnm, &outf, &mdebin, wcycle);
2469 /* Print to log file */
2470 print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
2472 /* Set variables for stepsize (in nm). This is the largest
2473 * step that we are going to make in any direction.
2475 ustep = inputrec->em_stepsize;
2476 stepsize = 0;
2478 /* Max number of steps */
2479 nsteps = inputrec->nsteps;
2481 if (MASTER(cr))
2483 /* Print to the screen */
2484 sp_header(stderr, SD, inputrec->em_tol, nsteps);
2486 if (fplog)
2488 sp_header(fplog, SD, inputrec->em_tol, nsteps);
2490 EnergyEvaluator energyEvaluator {
2491 fplog, mdlog, cr, ms,
2492 top_global, top,
2493 inputrec, nrnb, wcycle, gstat,
2494 vsite, constr, fcd, graph,
2495 mdAtoms, fr, enerd
2498 /**** HERE STARTS THE LOOP ****
2499 * count is the counter for the number of steps
2500 * bDone will be TRUE when the minimization has converged
2501 * bAbort will be TRUE when nsteps steps have been performed or when
2502 * the stepsize becomes smaller than is reasonable for machine precision
2504 count = 0;
2505 bDone = FALSE;
2506 bAbort = FALSE;
2507 while (!bDone && !bAbort)
2509 bAbort = (nsteps >= 0) && (count == nsteps);
2511 /* set new coordinates, except for first step */
2512 bool validStep = true;
2513 if (count > 0)
2515 validStep =
2516 do_em_step(cr, inputrec, mdatoms,
2517 s_min, stepsize, &s_min->f, s_try,
2518 constr, count);
2521 if (validStep)
2523 energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
2525 else
2527 // Signal constraint error during stepping with energy=inf
2528 s_try->epot = std::numeric_limits<real>::infinity();
2531 if (MASTER(cr))
2533 print_ebin_header(fplog, count, count);
2536 if (count == 0)
2538 s_min->epot = s_try->epot;
2541 /* Print it if necessary */
2542 if (MASTER(cr))
2544 if (mdrunOptions.verbose)
2546 fprintf(stderr, "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
2547 count, ustep, s_try->epot, s_try->fmax, s_try->a_fmax+1,
2548 ( (count == 0) || (s_try->epot < s_min->epot) ) ? '\n' : '\r');
2549 fflush(stderr);
2552 if ( (count == 0) || (s_try->epot < s_min->epot) )
2554 /* Store the new (lower) energies */
2555 matrix nullBox = {};
2556 upd_mdebin(mdebin, FALSE, FALSE, static_cast<double>(count),
2557 mdatoms->tmass, enerd, nullptr, nullptr, nullptr,
2558 nullBox, nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
2560 /* Prepare IMD energy record, if bIMD is TRUE. */
2561 IMD_fill_energy_record(inputrec->bIMD, inputrec->imd, enerd, count, TRUE);
2563 print_ebin(mdoutf_get_fp_ene(outf), TRUE,
2564 do_per_step(steps_accepted, inputrec->nstdisreout),
2565 do_per_step(steps_accepted, inputrec->nstorireout),
2566 fplog, count, count, eprNORMAL,
2567 mdebin, fcd, &(top_global->groups), &(inputrec->opts), nullptr);
2568 fflush(fplog);
2572 /* Now if the new energy is smaller than the previous...
2573 * or if this is the first step!
2574 * or if we did random steps!
2577 if ( (count == 0) || (s_try->epot < s_min->epot) )
2579 steps_accepted++;
2581 /* Test whether the convergence criterion is met... */
2582 bDone = (s_try->fmax < inputrec->em_tol);
2584 /* Copy the arrays for force, positions and energy */
2585 /* The 'Min' array always holds the coords and forces of the minimal
2586 sampled energy */
2587 swap_em_state(&s_min, &s_try);
2588 if (count > 0)
2590 ustep *= 1.2;
2593 /* Write to trn, if necessary */
2594 do_x = do_per_step(steps_accepted, inputrec->nstxout);
2595 do_f = do_per_step(steps_accepted, inputrec->nstfout);
2596 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr,
2597 top_global, inputrec, count,
2598 s_min, state_global, observablesHistory);
2600 else
2602 /* If energy is not smaller make the step smaller... */
2603 ustep *= 0.5;
2605 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
2607 /* Reload the old state */
2608 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec,
2609 s_min, top, mdAtoms, fr, vsite, constr,
2610 nrnb, wcycle);
2614 /* Determine new step */
2615 stepsize = ustep/s_min->fmax;
2617 /* Check if stepsize is too small, with 1 nm as a characteristic length */
2618 #if GMX_DOUBLE
2619 if (count == nsteps || ustep < 1e-12)
2620 #else
2621 if (count == nsteps || ustep < 1e-6)
2622 #endif
2624 if (MASTER(cr))
2626 warn_step(fplog, inputrec->em_tol, s_min->fmax,
2627 count == nsteps, constr != nullptr);
2629 bAbort = TRUE;
2632 /* Send IMD energies and positions, if bIMD is TRUE. */
2633 if (do_IMD(inputrec->bIMD, count, cr, TRUE, state_global->box,
2634 MASTER(cr) ? state_global->x.rvec_array() : nullptr,
2635 inputrec, 0, wcycle) &&
2636 MASTER(cr))
2638 IMD_send_positions(inputrec->imd);
2641 count++;
2642 } /* End of the loop */
2644 /* IMD cleanup, if bIMD is TRUE. */
2645 IMD_finalize(inputrec->bIMD, inputrec->imd);
2647 /* Print some data... */
2648 if (MASTER(cr))
2650 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2652 write_em_traj(fplog, cr, outf, TRUE, inputrec->nstfout != 0, ftp2fn(efSTO, nfile, fnm),
2653 top_global, inputrec, count,
2654 s_min, state_global, observablesHistory);
2656 if (MASTER(cr))
2658 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2660 print_converged(stderr, SD, inputrec->em_tol, count, bDone, nsteps,
2661 s_min, sqrtNumAtoms);
2662 print_converged(fplog, SD, inputrec->em_tol, count, bDone, nsteps,
2663 s_min, sqrtNumAtoms);
2666 finish_em(cr, outf, walltime_accounting, wcycle);
2668 /* To print the actual number of steps we needed somewhere */
2669 inputrec->nsteps = count;
2671 walltime_accounting_set_nsteps_done(walltime_accounting, count);
2674 void
2675 Integrator::do_nm()
2677 const char *NM = "Normal Mode Analysis";
2678 gmx_mdoutf_t outf;
2679 int nnodes, node;
2680 gmx_localtop_t *top;
2681 gmx_enerdata_t *enerd;
2682 gmx_global_stat_t gstat;
2683 t_graph *graph;
2684 tensor vir, pres;
2685 rvec mu_tot;
2686 rvec *dfdx;
2687 gmx_bool bSparse; /* use sparse matrix storage format */
2688 size_t sz;
2689 gmx_sparsematrix_t * sparse_matrix = nullptr;
2690 real * full_matrix = nullptr;
2692 /* added with respect to mdrun */
2693 int row, col;
2694 real der_range = 10.0*std::sqrt(GMX_REAL_EPS);
2695 real x_min;
2696 bool bIsMaster = MASTER(cr);
2697 auto mdatoms = mdAtoms->mdatoms();
2699 GMX_LOG(mdlog.info).asParagraph().
2700 appendText("Note that activating normal-mode analysis via the integrator "
2701 ".mdp option and the command gmx mdrun may "
2702 "be available in a different form in a future version of GROMACS, "
2703 "e.g. gmx normal-modes.");
2705 if (constr != nullptr)
2707 gmx_fatal(FARGS, "Constraints present with Normal Mode Analysis, this combination is not supported");
2710 gmx_shellfc_t *shellfc;
2712 em_state_t state_work {};
2714 /* Init em and store the local state in state_minimum */
2715 init_em(fplog, mdlog, NM, cr, ms, outputProvider, inputrec, mdrunOptions,
2716 state_global, top_global, &state_work, &top,
2717 nrnb, mu_tot, fr, &enerd, &graph, mdAtoms, &gstat,
2718 vsite, constr, &shellfc,
2719 nfile, fnm, &outf, nullptr, wcycle);
2721 std::vector<int> atom_index = get_atom_index(top_global);
2722 std::vector<gmx::RVec> fneg(atom_index.size(), {0, 0, 0});
2723 snew(dfdx, atom_index.size());
2725 #if !GMX_DOUBLE
2726 if (bIsMaster)
2728 fprintf(stderr,
2729 "NOTE: This version of GROMACS has been compiled in single precision,\n"
2730 " which MIGHT not be accurate enough for normal mode analysis.\n"
2731 " GROMACS now uses sparse matrix storage, so the memory requirements\n"
2732 " are fairly modest even if you recompile in double precision.\n\n");
2734 #endif
2736 /* Check if we can/should use sparse storage format.
2738 * Sparse format is only useful when the Hessian itself is sparse, which it
2739 * will be when we use a cutoff.
2740 * For small systems (n<1000) it is easier to always use full matrix format, though.
2742 if (EEL_FULL(fr->ic->eeltype) || fr->rlist == 0.0)
2744 GMX_LOG(mdlog.warning).appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
2745 bSparse = FALSE;
2747 else if (atom_index.size() < 1000)
2749 GMX_LOG(mdlog.warning).appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
2750 atom_index.size());
2751 bSparse = FALSE;
2753 else
2755 GMX_LOG(mdlog.warning).appendText("Using compressed symmetric sparse Hessian format.");
2756 bSparse = TRUE;
2759 /* Number of dimensions, based on real atoms, that is not vsites or shell */
2760 sz = DIM*atom_index.size();
2762 fprintf(stderr, "Allocating Hessian memory...\n\n");
2764 if (bSparse)
2766 sparse_matrix = gmx_sparsematrix_init(sz);
2767 sparse_matrix->compressed_symmetric = TRUE;
2769 else
2771 snew(full_matrix, sz*sz);
2774 init_nrnb(nrnb);
2777 /* Write start time and temperature */
2778 print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
2780 /* fudge nr of steps to nr of atoms */
2781 inputrec->nsteps = atom_index.size()*2;
2783 if (bIsMaster)
2785 fprintf(stderr, "starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
2786 *(top_global->name), inputrec->nsteps);
2789 nnodes = cr->nnodes;
2791 /* Make evaluate_energy do a single node force calculation */
2792 cr->nnodes = 1;
2793 EnergyEvaluator energyEvaluator {
2794 fplog, mdlog, cr, ms,
2795 top_global, top,
2796 inputrec, nrnb, wcycle, gstat,
2797 vsite, constr, fcd, graph,
2798 mdAtoms, fr, enerd
2800 energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
2801 cr->nnodes = nnodes;
2803 /* if forces are not small, warn user */
2804 get_state_f_norm_max(cr, &(inputrec->opts), mdatoms, &state_work);
2806 GMX_LOG(mdlog.warning).appendTextFormatted("Maximum force:%12.5e", state_work.fmax);
2807 if (state_work.fmax > 1.0e-3)
2809 GMX_LOG(mdlog.warning).appendText(
2810 "The force is probably not small enough to "
2811 "ensure that you are at a minimum.\n"
2812 "Be aware that negative eigenvalues may occur\n"
2813 "when the resulting matrix is diagonalized.");
2816 /***********************************************************
2818 * Loop over all pairs in matrix
2820 * do_force called twice. Once with positive and
2821 * once with negative displacement
2823 ************************************************************/
2825 /* Steps are divided one by one over the nodes */
2826 bool bNS = true;
2827 auto state_work_x = makeArrayRef(state_work.s.x);
2828 auto state_work_f = makeArrayRef(state_work.f);
2829 for (unsigned int aid = cr->nodeid; aid < atom_index.size(); aid += nnodes)
2831 size_t atom = atom_index[aid];
2832 for (size_t d = 0; d < DIM; d++)
2834 int64_t step = 0;
2835 int force_flags = GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES;
2836 double t = 0;
2838 x_min = state_work_x[atom][d];
2840 for (unsigned int dx = 0; (dx < 2); dx++)
2842 if (dx == 0)
2844 state_work_x[atom][d] = x_min - der_range;
2846 else
2848 state_work_x[atom][d] = x_min + der_range;
2851 /* Make evaluate_energy do a single node force calculation */
2852 cr->nnodes = 1;
2853 if (shellfc)
2855 /* Now is the time to relax the shells */
2856 relax_shell_flexcon(fplog,
2859 mdrunOptions.verbose,
2860 nullptr,
2861 step,
2862 inputrec,
2863 bNS,
2864 force_flags,
2865 top,
2866 constr,
2867 enerd,
2868 fcd,
2869 &state_work.s,
2870 state_work.f.arrayRefWithPadding(),
2871 vir,
2872 mdatoms,
2873 nrnb,
2874 wcycle,
2875 graph,
2876 &top_global->groups,
2877 shellfc,
2880 mu_tot,
2881 vsite,
2882 DdOpenBalanceRegionBeforeForceComputation::no,
2883 DdCloseBalanceRegionAfterForceComputation::no);
2884 bNS = false;
2885 step++;
2887 else
2889 energyEvaluator.run(&state_work, mu_tot, vir, pres, aid*2+dx, FALSE);
2892 cr->nnodes = nnodes;
2894 if (dx == 0)
2896 std::copy(state_work_f.begin(), state_work_f.begin()+atom_index.size(), fneg.begin());
2900 /* x is restored to original */
2901 state_work_x[atom][d] = x_min;
2903 for (size_t j = 0; j < atom_index.size(); j++)
2905 for (size_t k = 0; (k < DIM); k++)
2907 dfdx[j][k] =
2908 -(state_work_f[atom_index[j]][k] - fneg[j][k])/(2*der_range);
2912 if (!bIsMaster)
2914 #if GMX_MPI
2915 #define mpi_type GMX_MPI_REAL
2916 MPI_Send(dfdx[0], atom_index.size()*DIM, mpi_type, MASTER(cr),
2917 cr->nodeid, cr->mpi_comm_mygroup);
2918 #endif
2920 else
2922 for (node = 0; (node < nnodes && aid+node < atom_index.size()); node++)
2924 if (node > 0)
2926 #if GMX_MPI
2927 MPI_Status stat;
2928 MPI_Recv(dfdx[0], atom_index.size()*DIM, mpi_type, node, node,
2929 cr->mpi_comm_mygroup, &stat);
2930 #undef mpi_type
2931 #endif
2934 row = (aid + node)*DIM + d;
2936 for (size_t j = 0; j < atom_index.size(); j++)
2938 for (size_t k = 0; k < DIM; k++)
2940 col = j*DIM + k;
2942 if (bSparse)
2944 if (col >= row && dfdx[j][k] != 0.0)
2946 gmx_sparsematrix_increment_value(sparse_matrix,
2947 row, col, dfdx[j][k]);
2950 else
2952 full_matrix[row*sz+col] = dfdx[j][k];
2959 if (mdrunOptions.verbose && fplog)
2961 fflush(fplog);
2964 /* write progress */
2965 if (bIsMaster && mdrunOptions.verbose)
2967 fprintf(stderr, "\rFinished step %d out of %d",
2968 static_cast<int>(std::min(atom+nnodes, atom_index.size())),
2969 static_cast<int>(atom_index.size()));
2970 fflush(stderr);
2974 if (bIsMaster)
2976 fprintf(stderr, "\n\nWriting Hessian...\n");
2977 gmx_mtxio_write(ftp2fn(efMTX, nfile, fnm), sz, sz, full_matrix, sparse_matrix);
2980 finish_em(cr, outf, walltime_accounting, wcycle);
2982 walltime_accounting_set_nsteps_done(walltime_accounting, atom_index.size()*2);
2985 } // namespace gmx