Separate job script files for gmxapi package versions.
[gromacs.git] / src / gromacs / mdrun / minimize.cpp
blobf93b04245ce77d333a1788511a802a6ac8530dc8
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /*! \internal \file
40 * \brief This file defines integrators for energy minimization
42 * \author Berk Hess <hess@kth.se>
43 * \author Erik Lindahl <erik@kth.se>
44 * \ingroup module_mdrun
46 #include "gmxpre.h"
48 #include "config.h"
50 #include <cmath>
51 #include <cstring>
52 #include <ctime>
54 #include <algorithm>
55 #include <vector>
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/collect.h"
59 #include "gromacs/domdec/dlbtiming.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_struct.h"
62 #include "gromacs/domdec/mdsetup.h"
63 #include "gromacs/domdec/partition.h"
64 #include "gromacs/ewald/pme_pp.h"
65 #include "gromacs/fileio/confio.h"
66 #include "gromacs/fileio/mtxio.h"
67 #include "gromacs/gmxlib/network.h"
68 #include "gromacs/gmxlib/nrnb.h"
69 #include "gromacs/imd/imd.h"
70 #include "gromacs/linearalgebra/sparsematrix.h"
71 #include "gromacs/listed_forces/listed_forces.h"
72 #include "gromacs/math/functions.h"
73 #include "gromacs/math/vec.h"
74 #include "gromacs/mdlib/constr.h"
75 #include "gromacs/mdlib/coupling.h"
76 #include "gromacs/mdlib/dispersioncorrection.h"
77 #include "gromacs/mdlib/ebin.h"
78 #include "gromacs/mdlib/enerdata_utils.h"
79 #include "gromacs/mdlib/energyoutput.h"
80 #include "gromacs/mdlib/force.h"
81 #include "gromacs/mdlib/force_flags.h"
82 #include "gromacs/mdlib/forcerec.h"
83 #include "gromacs/mdlib/gmx_omp_nthreads.h"
84 #include "gromacs/mdlib/md_support.h"
85 #include "gromacs/mdlib/mdatoms.h"
86 #include "gromacs/mdlib/stat.h"
87 #include "gromacs/mdlib/tgroup.h"
88 #include "gromacs/mdlib/trajectory_writing.h"
89 #include "gromacs/mdlib/update.h"
90 #include "gromacs/mdlib/vsite.h"
91 #include "gromacs/mdrunutility/handlerestart.h"
92 #include "gromacs/mdrunutility/printtime.h"
93 #include "gromacs/mdtypes/checkpointdata.h"
94 #include "gromacs/mdtypes/commrec.h"
95 #include "gromacs/mdtypes/forcebuffers.h"
96 #include "gromacs/mdtypes/forcerec.h"
97 #include "gromacs/mdtypes/inputrec.h"
98 #include "gromacs/mdtypes/interaction_const.h"
99 #include "gromacs/mdtypes/md_enums.h"
100 #include "gromacs/mdtypes/mdatom.h"
101 #include "gromacs/mdtypes/mdrunoptions.h"
102 #include "gromacs/mdtypes/state.h"
103 #include "gromacs/pbcutil/pbc.h"
104 #include "gromacs/timing/wallcycle.h"
105 #include "gromacs/timing/walltime_accounting.h"
106 #include "gromacs/topology/mtop_util.h"
107 #include "gromacs/topology/topology.h"
108 #include "gromacs/utility/cstringutil.h"
109 #include "gromacs/utility/exceptions.h"
110 #include "gromacs/utility/fatalerror.h"
111 #include "gromacs/utility/logger.h"
112 #include "gromacs/utility/smalloc.h"
114 #include "legacysimulator.h"
115 #include "shellfc.h"
117 using gmx::ArrayRef;
118 using gmx::MdrunScheduleWorkload;
119 using gmx::RVec;
120 using gmx::VirtualSitesHandler;
122 //! Utility structure for manipulating states during EM
123 typedef struct em_state
125 //! Copy of the global state
126 t_state s;
127 //! Force array
128 gmx::ForceBuffers f;
129 //! Potential energy
130 real epot;
131 //! Norm of the force
132 real fnorm;
133 //! Maximum force
134 real fmax;
135 //! Direction
136 int a_fmax;
137 } em_state_t;
139 //! Print the EM starting conditions
140 static void print_em_start(FILE* fplog,
141 const t_commrec* cr,
142 gmx_walltime_accounting_t walltime_accounting,
143 gmx_wallcycle_t wcycle,
144 const char* name)
146 walltime_accounting_start_time(walltime_accounting);
147 wallcycle_start(wcycle, ewcRUN);
148 print_start(fplog, cr, walltime_accounting, name);
151 //! Stop counting time for EM
152 static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle_t wcycle)
154 wallcycle_stop(wcycle, ewcRUN);
156 walltime_accounting_end_time(walltime_accounting);
159 //! Printing a log file and console header
160 static void sp_header(FILE* out, const char* minimizer, real ftol, int nsteps)
162 fprintf(out, "\n");
163 fprintf(out, "%s:\n", minimizer);
164 fprintf(out, " Tolerance (Fmax) = %12.5e\n", ftol);
165 fprintf(out, " Number of steps = %12d\n", nsteps);
168 //! Print warning message
169 static void warn_step(FILE* fp, real ftol, real fmax, gmx_bool bLastStep, gmx_bool bConstrain)
171 constexpr bool realIsDouble = GMX_DOUBLE;
172 char buffer[2048];
174 if (!std::isfinite(fmax))
176 sprintf(buffer,
177 "\nEnergy minimization has stopped because the force "
178 "on at least one atom is not finite. This usually means "
179 "atoms are overlapping. Modify the input coordinates to "
180 "remove atom overlap or use soft-core potentials with "
181 "the free energy code to avoid infinite forces.\n%s",
182 !realIsDouble ? "You could also be lucky that switching to double precision "
183 "is sufficient to obtain finite forces.\n"
184 : "");
186 else if (bLastStep)
188 sprintf(buffer,
189 "\nEnergy minimization reached the maximum number "
190 "of steps before the forces reached the requested "
191 "precision Fmax < %g.\n",
192 ftol);
194 else
196 sprintf(buffer,
197 "\nEnergy minimization has stopped, but the forces have "
198 "not converged to the requested precision Fmax < %g (which "
199 "may not be possible for your system). It stopped "
200 "because the algorithm tried to make a new step whose size "
201 "was too small, or there was no change in the energy since "
202 "last step. Either way, we regard the minimization as "
203 "converged to within the available machine precision, "
204 "given your starting configuration and EM parameters.\n%s%s",
205 ftol,
206 !realIsDouble ? "\nDouble precision normally gives you higher accuracy, but "
207 "this is often not needed for preparing to run molecular "
208 "dynamics.\n"
209 : "",
210 bConstrain ? "You might need to increase your constraint accuracy, or turn\n"
211 "off constraints altogether (set constraints = none in mdp file)\n"
212 : "");
215 fputs(wrap_lines(buffer, 78, 0, FALSE), stderr);
216 fputs(wrap_lines(buffer, 78, 0, FALSE), fp);
219 //! Print message about convergence of the EM
220 static void print_converged(FILE* fp,
221 const char* alg,
222 real ftol,
223 int64_t count,
224 gmx_bool bDone,
225 int64_t nsteps,
226 const em_state_t* ems,
227 double sqrtNumAtoms)
229 char buf[STEPSTRSIZE];
231 if (bDone)
233 fprintf(fp, "\n%s converged to Fmax < %g in %s steps\n", alg, ftol, gmx_step_str(count, buf));
235 else if (count < nsteps)
237 fprintf(fp,
238 "\n%s converged to machine precision in %s steps,\n"
239 "but did not reach the requested Fmax < %g.\n",
240 alg, gmx_step_str(count, buf), ftol);
242 else
244 fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n", alg, ftol,
245 gmx_step_str(count, buf));
248 #if GMX_DOUBLE
249 fprintf(fp, "Potential Energy = %21.14e\n", ems->epot);
250 fprintf(fp, "Maximum force = %21.14e on atom %d\n", ems->fmax, ems->a_fmax + 1);
251 fprintf(fp, "Norm of force = %21.14e\n", ems->fnorm / sqrtNumAtoms);
252 #else
253 fprintf(fp, "Potential Energy = %14.7e\n", ems->epot);
254 fprintf(fp, "Maximum force = %14.7e on atom %d\n", ems->fmax, ems->a_fmax + 1);
255 fprintf(fp, "Norm of force = %14.7e\n", ems->fnorm / sqrtNumAtoms);
256 #endif
259 //! Compute the norm and max of the force array in parallel
260 static void get_f_norm_max(const t_commrec* cr,
261 t_grpopts* opts,
262 t_mdatoms* mdatoms,
263 gmx::ArrayRef<const gmx::RVec> f,
264 real* fnorm,
265 real* fmax,
266 int* a_fmax)
268 double fnorm2, *sum;
269 real fmax2, fam;
270 int la_max, a_max, start, end, i, m, gf;
272 /* This routine finds the largest force and returns it.
273 * On parallel machines the global max is taken.
275 fnorm2 = 0;
276 fmax2 = 0;
277 la_max = -1;
278 start = 0;
279 end = mdatoms->homenr;
280 if (mdatoms->cFREEZE)
282 for (i = start; i < end; i++)
284 gf = mdatoms->cFREEZE[i];
285 fam = 0;
286 for (m = 0; m < DIM; m++)
288 if (!opts->nFreeze[gf][m])
290 fam += gmx::square(f[i][m]);
293 fnorm2 += fam;
294 if (fam > fmax2)
296 fmax2 = fam;
297 la_max = i;
301 else
303 for (i = start; i < end; i++)
305 fam = norm2(f[i]);
306 fnorm2 += fam;
307 if (fam > fmax2)
309 fmax2 = fam;
310 la_max = i;
315 if (la_max >= 0 && DOMAINDECOMP(cr))
317 a_max = cr->dd->globalAtomIndices[la_max];
319 else
321 a_max = la_max;
323 if (PAR(cr))
325 snew(sum, 2 * cr->nnodes + 1);
326 sum[2 * cr->nodeid] = fmax2;
327 sum[2 * cr->nodeid + 1] = a_max;
328 sum[2 * cr->nnodes] = fnorm2;
329 gmx_sumd(2 * cr->nnodes + 1, sum, cr);
330 fnorm2 = sum[2 * cr->nnodes];
331 /* Determine the global maximum */
332 for (i = 0; i < cr->nnodes; i++)
334 if (sum[2 * i] > fmax2)
336 fmax2 = sum[2 * i];
337 a_max = gmx::roundToInt(sum[2 * i + 1]);
340 sfree(sum);
343 if (fnorm)
345 *fnorm = sqrt(fnorm2);
347 if (fmax)
349 *fmax = sqrt(fmax2);
351 if (a_fmax)
353 *a_fmax = a_max;
357 //! Compute the norm of the force
358 static void get_state_f_norm_max(const t_commrec* cr, t_grpopts* opts, t_mdatoms* mdatoms, em_state_t* ems)
360 get_f_norm_max(cr, opts, mdatoms, ems->f.view().force(), &ems->fnorm, &ems->fmax, &ems->a_fmax);
363 //! Initialize the energy minimization
364 static void init_em(FILE* fplog,
365 const gmx::MDLogger& mdlog,
366 const char* title,
367 const t_commrec* cr,
368 t_inputrec* ir,
369 gmx::ImdSession* imdSession,
370 pull_t* pull_work,
371 t_state* state_global,
372 const gmx_mtop_t* top_global,
373 em_state_t* ems,
374 gmx_localtop_t* top,
375 t_nrnb* nrnb,
376 t_forcerec* fr,
377 gmx::MDAtoms* mdAtoms,
378 gmx_global_stat_t* gstat,
379 VirtualSitesHandler* vsite,
380 gmx::Constraints* constr,
381 gmx_shellfc_t** shellfc)
383 real dvdl_constr;
385 if (fplog)
387 fprintf(fplog, "Initiating %s\n", title);
390 if (MASTER(cr))
392 state_global->ngtc = 0;
394 int* fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
395 gmx::ArrayRef<real> lambda = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
396 initialize_lambdas(fplog, *ir, MASTER(cr), fep_state, lambda);
398 if (ir->eI == eiNM)
400 GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
402 *shellfc =
403 init_shell_flexcon(stdout, top_global, constr ? constr->numFlexibleConstraints() : 0,
404 ir->nstcalcenergy, DOMAINDECOMP(cr), thisRankHasDuty(cr, DUTY_PME));
406 else
408 GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir->eI),
409 "This else currently only handles energy minimizers, consider if your algorithm "
410 "needs shell/flexible-constraint support");
412 /* With energy minimization, shells and flexible constraints are
413 * automatically minimized when treated like normal DOFS.
415 if (shellfc != nullptr)
417 *shellfc = nullptr;
421 if (DOMAINDECOMP(cr))
423 dd_init_local_state(cr->dd, state_global, &ems->s);
425 /* Distribute the charge groups over the nodes from the master node */
426 dd_partition_system(fplog, mdlog, ir->init_step, cr, TRUE, 1, state_global, *top_global, ir,
427 imdSession, pull_work, &ems->s, &ems->f, mdAtoms, top, fr, vsite,
428 constr, nrnb, nullptr, FALSE);
429 dd_store_state(cr->dd, &ems->s);
431 else
433 state_change_natoms(state_global, state_global->natoms);
434 /* Just copy the state */
435 ems->s = *state_global;
436 state_change_natoms(&ems->s, ems->s.natoms);
438 mdAlgorithmsSetupAtomData(cr, ir, *top_global, top, fr, &ems->f, mdAtoms, constr, vsite,
439 shellfc ? *shellfc : nullptr);
442 update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[efptMASS]);
444 if (constr)
446 // TODO how should this cross-module support dependency be managed?
447 if (ir->eConstrAlg == econtSHAKE && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
449 gmx_fatal(FARGS, "Can not do energy minimization with %s, use %s\n",
450 econstr_names[econtSHAKE], econstr_names[econtLINCS]);
453 if (!ir->bContinuation)
455 /* Constrain the starting coordinates */
456 bool needsLogging = true;
457 bool computeEnergy = true;
458 bool computeVirial = false;
459 dvdl_constr = 0;
460 constr->apply(needsLogging, computeEnergy, -1, 0, 1.0, ems->s.x.arrayRefWithPadding(),
461 ems->s.x.arrayRefWithPadding(), ArrayRef<RVec>(), ems->s.box,
462 ems->s.lambda[efptFEP], &dvdl_constr, gmx::ArrayRefWithPadding<RVec>(),
463 computeVirial, nullptr, gmx::ConstraintVariable::Positions);
467 if (PAR(cr))
469 *gstat = global_stat_init(ir);
471 else
473 *gstat = nullptr;
476 calc_shifts(ems->s.box, fr->shift_vec);
479 //! Finalize the minimization
480 static void finish_em(const t_commrec* cr,
481 gmx_mdoutf_t outf,
482 gmx_walltime_accounting_t walltime_accounting,
483 gmx_wallcycle_t wcycle)
485 if (!thisRankHasDuty(cr, DUTY_PME))
487 /* Tell the PME only node to finish */
488 gmx_pme_send_finish(cr);
491 done_mdoutf(outf);
493 em_time_end(walltime_accounting, wcycle);
496 //! Swap two different EM states during minimization
497 static void swap_em_state(em_state_t** ems1, em_state_t** ems2)
499 em_state_t* tmp;
501 tmp = *ems1;
502 *ems1 = *ems2;
503 *ems2 = tmp;
506 //! Save the EM trajectory
507 static void write_em_traj(FILE* fplog,
508 const t_commrec* cr,
509 gmx_mdoutf_t outf,
510 gmx_bool bX,
511 gmx_bool bF,
512 const char* confout,
513 const gmx_mtop_t* top_global,
514 t_inputrec* ir,
515 int64_t step,
516 em_state_t* state,
517 t_state* state_global,
518 ObservablesHistory* observablesHistory)
520 int mdof_flags = 0;
522 if (bX)
524 mdof_flags |= MDOF_X;
526 if (bF)
528 mdof_flags |= MDOF_F;
531 /* If we want IMD output, set appropriate MDOF flag */
532 if (ir->bIMD)
534 mdof_flags |= MDOF_IMD;
537 gmx::WriteCheckpointDataHolder checkpointDataHolder;
538 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
539 static_cast<double>(step), &state->s, state_global,
540 observablesHistory, state->f.view().force(), &checkpointDataHolder);
542 if (confout != nullptr)
544 if (DOMAINDECOMP(cr))
546 /* If bX=true, x was collected to state_global in the call above */
547 if (!bX)
549 auto globalXRef = MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>();
550 dd_collect_vec(cr->dd, state->s.ddp_count, state->s.ddp_count_cg_gl, state->s.cg_gl,
551 state->s.x, globalXRef);
554 else
556 /* Copy the local state pointer */
557 state_global = &state->s;
560 if (MASTER(cr))
562 if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && DOMAINDECOMP(cr))
564 /* Make molecules whole only for confout writing */
565 do_pbc_mtop(ir->pbcType, state->s.box, top_global, state_global->x.rvec_array());
568 write_sto_conf_mtop(confout, *top_global->name, top_global,
569 state_global->x.rvec_array(), nullptr, ir->pbcType, state->s.box);
574 //! \brief Do one minimization step
576 // \returns true when the step succeeded, false when a constraint error occurred
577 static bool do_em_step(const t_commrec* cr,
578 t_inputrec* ir,
579 t_mdatoms* md,
580 em_state_t* ems1,
581 real a,
582 gmx::ArrayRefWithPadding<const gmx::RVec> force,
583 em_state_t* ems2,
584 gmx::Constraints* constr,
585 int64_t count)
588 t_state *s1, *s2;
589 int start, end;
590 real dvdl_constr;
591 int nthreads gmx_unused;
593 bool validStep = true;
595 s1 = &ems1->s;
596 s2 = &ems2->s;
598 if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
600 gmx_incons("state mismatch in do_em_step");
603 s2->flags = s1->flags;
605 if (s2->natoms != s1->natoms)
607 state_change_natoms(s2, s1->natoms);
608 ems2->f.resize(s2->natoms);
610 if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
612 s2->cg_gl.resize(s1->cg_gl.size());
615 copy_mat(s1->box, s2->box);
616 /* Copy free energy state */
617 s2->lambda = s1->lambda;
618 copy_mat(s1->box, s2->box);
620 start = 0;
621 end = md->homenr;
623 nthreads = gmx_omp_nthreads_get(emntUpdate);
624 #pragma omp parallel num_threads(nthreads)
626 const rvec* x1 = s1->x.rvec_array();
627 rvec* x2 = s2->x.rvec_array();
628 const rvec* f = as_rvec_array(force.unpaddedArrayRef().data());
630 int gf = 0;
631 #pragma omp for schedule(static) nowait
632 for (int i = start; i < end; i++)
636 if (md->cFREEZE)
638 gf = md->cFREEZE[i];
640 for (int m = 0; m < DIM; m++)
642 if (ir->opts.nFreeze[gf][m])
644 x2[i][m] = x1[i][m];
646 else
648 x2[i][m] = x1[i][m] + a * f[i][m];
652 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
655 if (s2->flags & (1 << estCGP))
657 /* Copy the CG p vector */
658 const rvec* p1 = s1->cg_p.rvec_array();
659 rvec* p2 = s2->cg_p.rvec_array();
660 #pragma omp for schedule(static) nowait
661 for (int i = start; i < end; i++)
663 // Trivial OpenMP block that does not throw
664 copy_rvec(p1[i], p2[i]);
668 if (DOMAINDECOMP(cr))
670 /* OpenMP does not supported unsigned loop variables */
671 #pragma omp for schedule(static) nowait
672 for (gmx::index i = 0; i < gmx::ssize(s2->cg_gl); i++)
674 s2->cg_gl[i] = s1->cg_gl[i];
679 if (DOMAINDECOMP(cr))
681 s2->ddp_count = s1->ddp_count;
682 s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
685 if (constr)
687 dvdl_constr = 0;
688 validStep = constr->apply(
689 TRUE, TRUE, count, 0, 1.0, s1->x.arrayRefWithPadding(), s2->x.arrayRefWithPadding(),
690 ArrayRef<RVec>(), s2->box, s2->lambda[efptBONDED], &dvdl_constr,
691 gmx::ArrayRefWithPadding<RVec>(), false, nullptr, gmx::ConstraintVariable::Positions);
693 if (cr->nnodes > 1)
695 /* This global reduction will affect performance at high
696 * parallelization, but we can not really avoid it.
697 * But usually EM is not run at high parallelization.
699 int reductionBuffer = static_cast<int>(!validStep);
700 gmx_sumi(1, &reductionBuffer, cr);
701 validStep = (reductionBuffer == 0);
704 // We should move this check to the different minimizers
705 if (!validStep && ir->eI != eiSteep)
707 gmx_fatal(FARGS,
708 "The coordinates could not be constrained. Minimizer '%s' can not handle "
709 "constraint failures, use minimizer '%s' before using '%s'.",
710 EI(ir->eI), EI(eiSteep), EI(ir->eI));
714 return validStep;
717 //! Prepare EM for using domain decomposition parallellization
718 static void em_dd_partition_system(FILE* fplog,
719 const gmx::MDLogger& mdlog,
720 int step,
721 const t_commrec* cr,
722 const gmx_mtop_t* top_global,
723 t_inputrec* ir,
724 gmx::ImdSession* imdSession,
725 pull_t* pull_work,
726 em_state_t* ems,
727 gmx_localtop_t* top,
728 gmx::MDAtoms* mdAtoms,
729 t_forcerec* fr,
730 VirtualSitesHandler* vsite,
731 gmx::Constraints* constr,
732 t_nrnb* nrnb,
733 gmx_wallcycle_t wcycle)
735 /* Repartition the domain decomposition */
736 dd_partition_system(fplog, mdlog, step, cr, FALSE, 1, nullptr, *top_global, ir, imdSession, pull_work,
737 &ems->s, &ems->f, mdAtoms, top, fr, vsite, constr, nrnb, wcycle, FALSE);
738 dd_store_state(cr->dd, &ems->s);
741 namespace
744 /*! \brief Class to handle the work of setting and doing an energy evaluation.
746 * This class is a mere aggregate of parameters to pass to evaluate an
747 * energy, so that future changes to names and types of them consume
748 * less time when refactoring other code.
750 * Aggregate initialization is used, for which the chief risk is that
751 * if a member is added at the end and not all initializer lists are
752 * updated, then the member will be value initialized, which will
753 * typically mean initialization to zero.
755 * Use a braced initializer list to construct one of these. */
756 class EnergyEvaluator
758 public:
759 /*! \brief Evaluates an energy on the state in \c ems.
761 * \todo In practice, the same objects mu_tot, vir, and pres
762 * are always passed to this function, so we would rather have
763 * them as data members. However, their C-array types are
764 * unsuited for aggregate initialization. When the types
765 * improve, the call signature of this method can be reduced.
767 void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst);
768 //! Handles logging (deprecated).
769 FILE* fplog;
770 //! Handles logging.
771 const gmx::MDLogger& mdlog;
772 //! Handles communication.
773 const t_commrec* cr;
774 //! Coordinates multi-simulations.
775 const gmx_multisim_t* ms;
776 //! Holds the simulation topology.
777 const gmx_mtop_t* top_global;
778 //! Holds the domain topology.
779 gmx_localtop_t* top;
780 //! User input options.
781 t_inputrec* inputrec;
782 //! The Interactive Molecular Dynamics session.
783 gmx::ImdSession* imdSession;
784 //! The pull work object.
785 pull_t* pull_work;
786 //! Manages flop accounting.
787 t_nrnb* nrnb;
788 //! Manages wall cycle accounting.
789 gmx_wallcycle_t wcycle;
790 //! Coordinates global reduction.
791 gmx_global_stat_t gstat;
792 //! Handles virtual sites.
793 VirtualSitesHandler* vsite;
794 //! Handles constraints.
795 gmx::Constraints* constr;
796 //! Per-atom data for this domain.
797 gmx::MDAtoms* mdAtoms;
798 //! Handles how to calculate the forces.
799 t_forcerec* fr;
800 //! Schedule of force-calculation work each step for this task.
801 MdrunScheduleWorkload* runScheduleWork;
802 //! Stores the computed energies.
803 gmx_enerdata_t* enerd;
806 void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst)
808 real t;
809 gmx_bool bNS;
810 tensor force_vir, shake_vir, ekin;
811 real dvdl_constr;
812 real terminate = 0;
814 /* Set the time to the initial time, the time does not change during EM */
815 t = inputrec->init_t;
817 if (bFirst || (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
819 /* This is the first state or an old state used before the last ns */
820 bNS = TRUE;
822 else
824 bNS = FALSE;
825 if (inputrec->nstlist > 0)
827 bNS = TRUE;
831 if (vsite)
833 vsite->construct(ems->s.x, 1, {}, ems->s.box);
836 if (DOMAINDECOMP(cr) && bNS)
838 /* Repartition the domain decomposition */
839 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work,
840 ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
843 /* Calc force & energy on new trial position */
844 /* do_force always puts the charge groups in the box and shifts again
845 * We do not unshift, so molecules are always whole in congrad.c
847 do_force(fplog, cr, ms, inputrec, nullptr, nullptr, imdSession, pull_work, count, nrnb, wcycle,
848 top, ems->s.box, ems->s.x.arrayRefWithPadding(), &ems->s.hist, &ems->f.view(), force_vir,
849 mdAtoms->mdatoms(), enerd, ems->s.lambda, fr, runScheduleWork, vsite, mu_tot, t, nullptr,
850 GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY
851 | (bNS ? GMX_FORCE_NS : 0),
852 DDBalanceRegionHandler(cr));
854 /* Clear the unused shake virial and pressure */
855 clear_mat(shake_vir);
856 clear_mat(pres);
858 /* Communicate stuff when parallel */
859 if (PAR(cr) && inputrec->eI != eiNM)
861 wallcycle_start(wcycle, ewcMoveE);
863 global_stat(gstat, cr, enerd, force_vir, shake_vir, inputrec, nullptr, nullptr, nullptr, 1,
864 &terminate, nullptr, FALSE, CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT);
866 wallcycle_stop(wcycle, ewcMoveE);
869 if (fr->dispersionCorrection)
871 /* Calculate long range corrections to pressure and energy */
872 const DispersionCorrection::Correction correction =
873 fr->dispersionCorrection->calculate(ems->s.box, ems->s.lambda[efptVDW]);
875 enerd->term[F_DISPCORR] = correction.energy;
876 enerd->term[F_EPOT] += correction.energy;
877 enerd->term[F_PRES] += correction.pressure;
878 enerd->term[F_DVDL] += correction.dvdl;
880 else
882 enerd->term[F_DISPCORR] = 0;
885 ems->epot = enerd->term[F_EPOT];
887 if (constr)
889 /* Project out the constraint components of the force */
890 bool needsLogging = false;
891 bool computeEnergy = false;
892 bool computeVirial = true;
893 dvdl_constr = 0;
894 auto f = ems->f.view().forceWithPadding();
895 constr->apply(needsLogging, computeEnergy, count, 0, 1.0, ems->s.x.arrayRefWithPadding(), f,
896 f.unpaddedArrayRef(), ems->s.box, ems->s.lambda[efptBONDED], &dvdl_constr,
897 gmx::ArrayRefWithPadding<RVec>(), computeVirial, shake_vir,
898 gmx::ConstraintVariable::ForceDispl);
899 enerd->term[F_DVDL_CONSTR] += dvdl_constr;
900 m_add(force_vir, shake_vir, vir);
902 else
904 copy_mat(force_vir, vir);
907 clear_mat(ekin);
908 enerd->term[F_PRES] = calc_pres(fr->pbcType, inputrec->nwall, ems->s.box, ekin, vir, pres);
910 if (inputrec->efep != efepNO)
912 accumulateKineticLambdaComponents(enerd, ems->s.lambda, *inputrec->fepvals);
915 if (EI_ENERGY_MINIMIZATION(inputrec->eI))
917 get_state_f_norm_max(cr, &(inputrec->opts), mdAtoms->mdatoms(), ems);
921 } // namespace
923 //! Parallel utility summing energies and forces
924 static double reorder_partsum(const t_commrec* cr,
925 t_grpopts* opts,
926 const gmx_mtop_t* top_global,
927 const em_state_t* s_min,
928 const em_state_t* s_b)
930 if (debug)
932 fprintf(debug, "Doing reorder_partsum\n");
935 auto fm = s_min->f.view().force();
936 auto fb = s_b->f.view().force();
938 /* Collect fm in a global vector fmg.
939 * This conflicts with the spirit of domain decomposition,
940 * but to fully optimize this a much more complicated algorithm is required.
942 const int natoms = top_global->natoms;
943 rvec* fmg;
944 snew(fmg, natoms);
946 gmx::ArrayRef<const int> indicesMin = s_min->s.cg_gl;
947 int i = 0;
948 for (int a : indicesMin)
950 copy_rvec(fm[i], fmg[a]);
951 i++;
953 gmx_sum(top_global->natoms * 3, fmg[0], cr);
955 /* Now we will determine the part of the sum for the cgs in state s_b */
956 gmx::ArrayRef<const int> indicesB = s_b->s.cg_gl;
958 double partsum = 0;
959 i = 0;
960 int gf = 0;
961 gmx::ArrayRef<const unsigned char> grpnrFREEZE =
962 top_global->groups.groupNumbers[SimulationAtomGroupType::Freeze];
963 for (int a : indicesB)
965 if (!grpnrFREEZE.empty())
967 gf = grpnrFREEZE[i];
969 for (int m = 0; m < DIM; m++)
971 if (!opts->nFreeze[gf][m])
973 partsum += (fb[i][m] - fmg[a][m]) * fb[i][m];
976 i++;
979 sfree(fmg);
981 return partsum;
984 //! Print some stuff, like beta, whatever that means.
985 static real pr_beta(const t_commrec* cr,
986 t_grpopts* opts,
987 t_mdatoms* mdatoms,
988 const gmx_mtop_t* top_global,
989 const em_state_t* s_min,
990 const em_state_t* s_b)
992 double sum;
994 /* This is just the classical Polak-Ribiere calculation of beta;
995 * it looks a bit complicated since we take freeze groups into account,
996 * and might have to sum it in parallel runs.
999 if (!DOMAINDECOMP(cr)
1000 || (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
1002 auto fm = s_min->f.view().force();
1003 auto fb = s_b->f.view().force();
1004 sum = 0;
1005 int gf = 0;
1006 /* This part of code can be incorrect with DD,
1007 * since the atom ordering in s_b and s_min might differ.
1009 for (int i = 0; i < mdatoms->homenr; i++)
1011 if (mdatoms->cFREEZE)
1013 gf = mdatoms->cFREEZE[i];
1015 for (int m = 0; m < DIM; m++)
1017 if (!opts->nFreeze[gf][m])
1019 sum += (fb[i][m] - fm[i][m]) * fb[i][m];
1024 else
1026 /* We need to reorder cgs while summing */
1027 sum = reorder_partsum(cr, opts, top_global, s_min, s_b);
1029 if (PAR(cr))
1031 gmx_sumd(1, &sum, cr);
1034 return sum / gmx::square(s_min->fnorm);
1037 namespace gmx
1040 void LegacySimulator::do_cg()
1042 const char* CG = "Polak-Ribiere Conjugate Gradients";
1044 gmx_localtop_t top(top_global->ffparams);
1045 gmx_global_stat_t gstat;
1046 double tmp, minstep;
1047 real stepsize;
1048 real a, b, c, beta = 0.0;
1049 real epot_repl = 0;
1050 real pnorm;
1051 gmx_bool converged, foundlower;
1052 rvec mu_tot = { 0 };
1053 gmx_bool do_log = FALSE, do_ene = FALSE, do_x, do_f;
1054 tensor vir, pres;
1055 int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
1056 int m, step, nminstep;
1057 auto mdatoms = mdAtoms->mdatoms();
1059 GMX_LOG(mdlog.info)
1060 .asParagraph()
1061 .appendText(
1062 "Note that activating conjugate gradient energy minimization via the "
1063 "integrator .mdp option and the command gmx mdrun may "
1064 "be available in a different form in a future version of GROMACS, "
1065 "e.g. gmx minimize and an .mdp option.");
1067 step = 0;
1069 if (MASTER(cr))
1071 // In CG, the state is extended with a search direction
1072 state_global->flags |= (1 << estCGP);
1074 // Ensure the extra per-atom state array gets allocated
1075 state_change_natoms(state_global, state_global->natoms);
1077 // Initialize the search direction to zero
1078 for (RVec& cg_p : state_global->cg_p)
1080 cg_p = { 0, 0, 0 };
1084 /* Create 4 states on the stack and extract pointers that we will swap */
1085 em_state_t s0{}, s1{}, s2{}, s3{};
1086 em_state_t* s_min = &s0;
1087 em_state_t* s_a = &s1;
1088 em_state_t* s_b = &s2;
1089 em_state_t* s_c = &s3;
1091 /* Init em and store the local state in s_min */
1092 init_em(fplog, mdlog, CG, cr, inputrec, imdSession, pull_work, state_global, top_global, s_min,
1093 &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, nullptr);
1094 const bool simulationsShareState = false;
1095 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
1096 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
1097 StartingBehavior::NewSimulation, simulationsShareState, ms);
1098 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work,
1099 nullptr, false, StartingBehavior::NewSimulation,
1100 simulationsShareState, mdModulesNotifier);
1102 /* Print to log file */
1103 print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
1105 /* Max number of steps */
1106 number_steps = inputrec->nsteps;
1108 if (MASTER(cr))
1110 sp_header(stderr, CG, inputrec->em_tol, number_steps);
1112 if (fplog)
1114 sp_header(fplog, CG, inputrec->em_tol, number_steps);
1117 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
1118 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
1119 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
1120 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1121 /* do_force always puts the charge groups in the box and shifts again
1122 * We do not unshift, so molecules are always whole in congrad.c
1124 energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
1126 if (MASTER(cr))
1128 /* Copy stuff to the energy bin for easy printing etc. */
1129 matrix nullBox = {};
1130 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1131 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
1132 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1134 EnergyOutput::printHeader(fplog, step, step);
1135 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
1136 step, fr->fcdata.get(), nullptr);
1139 /* Estimate/guess the initial stepsize */
1140 stepsize = inputrec->em_stepsize / s_min->fnorm;
1142 if (MASTER(cr))
1144 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1145 fprintf(stderr, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1146 fprintf(stderr, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1147 fprintf(stderr, "\n");
1148 /* and copy to the log file too... */
1149 fprintf(fplog, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1150 fprintf(fplog, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1151 fprintf(fplog, "\n");
1153 /* Start the loop over CG steps.
1154 * Each successful step is counted, and we continue until
1155 * we either converge or reach the max number of steps.
1157 converged = FALSE;
1158 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1161 /* start taking steps in a new direction
1162 * First time we enter the routine, beta=0, and the direction is
1163 * simply the negative gradient.
1166 /* Calculate the new direction in p, and the gradient in this direction, gpa */
1167 gmx::ArrayRef<gmx::RVec> pm = s_min->s.cg_p;
1168 gmx::ArrayRef<const gmx::RVec> sfm = s_min->f.view().force();
1169 double gpa = 0;
1170 int gf = 0;
1171 for (int i = 0; i < mdatoms->homenr; i++)
1173 if (mdatoms->cFREEZE)
1175 gf = mdatoms->cFREEZE[i];
1177 for (m = 0; m < DIM; m++)
1179 if (!inputrec->opts.nFreeze[gf][m])
1181 pm[i][m] = sfm[i][m] + beta * pm[i][m];
1182 gpa -= pm[i][m] * sfm[i][m];
1183 /* f is negative gradient, thus the sign */
1185 else
1187 pm[i][m] = 0;
1192 /* Sum the gradient along the line across CPUs */
1193 if (PAR(cr))
1195 gmx_sumd(1, &gpa, cr);
1198 /* Calculate the norm of the search vector */
1199 get_f_norm_max(cr, &(inputrec->opts), mdatoms, pm, &pnorm, nullptr, nullptr);
1201 /* Just in case stepsize reaches zero due to numerical precision... */
1202 if (stepsize <= 0)
1204 stepsize = inputrec->em_stepsize / pnorm;
1208 * Double check the value of the derivative in the search direction.
1209 * If it is positive it must be due to the old information in the
1210 * CG formula, so just remove that and start over with beta=0.
1211 * This corresponds to a steepest descent step.
1213 if (gpa > 0)
1215 beta = 0;
1216 step--; /* Don't count this step since we are restarting */
1217 continue; /* Go back to the beginning of the big for-loop */
1220 /* Calculate minimum allowed stepsize, before the average (norm)
1221 * relative change in coordinate is smaller than precision
1223 minstep = 0;
1224 auto s_min_x = makeArrayRef(s_min->s.x);
1225 for (int i = 0; i < mdatoms->homenr; i++)
1227 for (m = 0; m < DIM; m++)
1229 tmp = fabs(s_min_x[i][m]);
1230 if (tmp < 1.0)
1232 tmp = 1.0;
1234 tmp = pm[i][m] / tmp;
1235 minstep += tmp * tmp;
1238 /* Add up from all CPUs */
1239 if (PAR(cr))
1241 gmx_sumd(1, &minstep, cr);
1244 minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global->natoms));
1246 if (stepsize < minstep)
1248 converged = TRUE;
1249 break;
1252 /* Write coordinates if necessary */
1253 do_x = do_per_step(step, inputrec->nstxout);
1254 do_f = do_per_step(step, inputrec->nstfout);
1256 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, step, s_min,
1257 state_global, observablesHistory);
1259 /* Take a step downhill.
1260 * In theory, we should minimize the function along this direction.
1261 * That is quite possible, but it turns out to take 5-10 function evaluations
1262 * for each line. However, we dont really need to find the exact minimum -
1263 * it is much better to start a new CG step in a modified direction as soon
1264 * as we are close to it. This will save a lot of energy evaluations.
1266 * In practice, we just try to take a single step.
1267 * If it worked (i.e. lowered the energy), we increase the stepsize but
1268 * the continue straight to the next CG step without trying to find any minimum.
1269 * If it didn't work (higher energy), there must be a minimum somewhere between
1270 * the old position and the new one.
1272 * Due to the finite numerical accuracy, it turns out that it is a good idea
1273 * to even accept a SMALL increase in energy, if the derivative is still downhill.
1274 * This leads to lower final energies in the tests I've done. / Erik
1276 s_a->epot = s_min->epot;
1277 a = 0.0;
1278 c = a + stepsize; /* reference position along line is zero */
1280 if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
1282 em_dd_partition_system(fplog, mdlog, step, cr, top_global, inputrec, imdSession,
1283 pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
1286 /* Take a trial step (new coords in s_c) */
1287 do_em_step(cr, inputrec, mdatoms, s_min, c, s_min->s.cg_p.constArrayRefWithPadding(), s_c,
1288 constr, -1);
1290 neval++;
1291 /* Calculate energy for the trial step */
1292 energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
1294 /* Calc derivative along line */
1295 const rvec* pc = s_c->s.cg_p.rvec_array();
1296 gmx::ArrayRef<const gmx::RVec> sfc = s_c->f.view().force();
1297 double gpc = 0;
1298 for (int i = 0; i < mdatoms->homenr; i++)
1300 for (m = 0; m < DIM; m++)
1302 gpc -= pc[i][m] * sfc[i][m]; /* f is negative gradient, thus the sign */
1305 /* Sum the gradient along the line across CPUs */
1306 if (PAR(cr))
1308 gmx_sumd(1, &gpc, cr);
1311 /* This is the max amount of increase in energy we tolerate */
1312 tmp = std::sqrt(GMX_REAL_EPS) * fabs(s_a->epot);
1314 /* Accept the step if the energy is lower, or if it is not significantly higher
1315 * and the line derivative is still negative.
1317 if (s_c->epot < s_a->epot || (gpc < 0 && s_c->epot < (s_a->epot + tmp)))
1319 foundlower = TRUE;
1320 /* Great, we found a better energy. Increase step for next iteration
1321 * if we are still going down, decrease it otherwise
1323 if (gpc < 0)
1325 stepsize *= 1.618034; /* The golden section */
1327 else
1329 stepsize *= 0.618034; /* 1/golden section */
1332 else
1334 /* New energy is the same or higher. We will have to do some work
1335 * to find a smaller value in the interval. Take smaller step next time!
1337 foundlower = FALSE;
1338 stepsize *= 0.618034;
1342 /* OK, if we didn't find a lower value we will have to locate one now - there must
1343 * be one in the interval [a=0,c].
1344 * The same thing is valid here, though: Don't spend dozens of iterations to find
1345 * the line minimum. We try to interpolate based on the derivative at the endpoints,
1346 * and only continue until we find a lower value. In most cases this means 1-2 iterations.
1348 * I also have a safeguard for potentially really pathological functions so we never
1349 * take more than 20 steps before we give up ...
1351 * If we already found a lower value we just skip this step and continue to the update.
1353 double gpb;
1354 if (!foundlower)
1356 nminstep = 0;
1360 /* Select a new trial point.
1361 * If the derivatives at points a & c have different sign we interpolate to zero,
1362 * otherwise just do a bisection.
1364 if (gpa < 0 && gpc > 0)
1366 b = a + gpa * (a - c) / (gpc - gpa);
1368 else
1370 b = 0.5 * (a + c);
1373 /* safeguard if interpolation close to machine accuracy causes errors:
1374 * never go outside the interval
1376 if (b <= a || b >= c)
1378 b = 0.5 * (a + c);
1381 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
1383 /* Reload the old state */
1384 em_dd_partition_system(fplog, mdlog, -1, cr, top_global, inputrec, imdSession, pull_work,
1385 s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
1388 /* Take a trial step to this new point - new coords in s_b */
1389 do_em_step(cr, inputrec, mdatoms, s_min, b,
1390 s_min->s.cg_p.constArrayRefWithPadding(), s_b, constr, -1);
1392 neval++;
1393 /* Calculate energy for the trial step */
1394 energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
1396 /* p does not change within a step, but since the domain decomposition
1397 * might change, we have to use cg_p of s_b here.
1399 const rvec* pb = s_b->s.cg_p.rvec_array();
1400 gmx::ArrayRef<const gmx::RVec> sfb = s_b->f.view().force();
1401 gpb = 0;
1402 for (int i = 0; i < mdatoms->homenr; i++)
1404 for (m = 0; m < DIM; m++)
1406 gpb -= pb[i][m] * sfb[i][m]; /* f is negative gradient, thus the sign */
1409 /* Sum the gradient along the line across CPUs */
1410 if (PAR(cr))
1412 gmx_sumd(1, &gpb, cr);
1415 if (debug)
1417 fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n", s_a->epot, s_b->epot,
1418 s_c->epot, gpb);
1421 epot_repl = s_b->epot;
1423 /* Keep one of the intervals based on the value of the derivative at the new point */
1424 if (gpb > 0)
1426 /* Replace c endpoint with b */
1427 swap_em_state(&s_b, &s_c);
1428 c = b;
1429 gpc = gpb;
1431 else
1433 /* Replace a endpoint with b */
1434 swap_em_state(&s_b, &s_a);
1435 a = b;
1436 gpa = gpb;
1440 * Stop search as soon as we find a value smaller than the endpoints.
1441 * Never run more than 20 steps, no matter what.
1443 nminstep++;
1444 } while ((epot_repl > s_a->epot || epot_repl > s_c->epot) && (nminstep < 20));
1446 if (std::fabs(epot_repl - s_min->epot) < fabs(s_min->epot) * GMX_REAL_EPS || nminstep >= 20)
1448 /* OK. We couldn't find a significantly lower energy.
1449 * If beta==0 this was steepest descent, and then we give up.
1450 * If not, set beta=0 and restart with steepest descent before quitting.
1452 if (beta == 0.0)
1454 /* Converged */
1455 converged = TRUE;
1456 break;
1458 else
1460 /* Reset memory before giving up */
1461 beta = 0.0;
1462 continue;
1466 /* Select min energy state of A & C, put the best in B.
1468 if (s_c->epot < s_a->epot)
1470 if (debug)
1472 fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n", s_c->epot,
1473 s_a->epot);
1475 swap_em_state(&s_b, &s_c);
1476 gpb = gpc;
1478 else
1480 if (debug)
1482 fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n", s_a->epot,
1483 s_c->epot);
1485 swap_em_state(&s_b, &s_a);
1486 gpb = gpa;
1489 else
1491 if (debug)
1493 fprintf(debug, "CGE: Found a lower energy %f, moving C to B\n", s_c->epot);
1495 swap_em_state(&s_b, &s_c);
1496 gpb = gpc;
1499 /* new search direction */
1500 /* beta = 0 means forget all memory and restart with steepest descents. */
1501 if (nstcg && ((step % nstcg) == 0))
1503 beta = 0.0;
1505 else
1507 /* s_min->fnorm cannot be zero, because then we would have converged
1508 * and broken out.
1511 /* Polak-Ribiere update.
1512 * Change to fnorm2/fnorm2_old for Fletcher-Reeves
1514 beta = pr_beta(cr, &inputrec->opts, mdatoms, top_global, s_min, s_b);
1516 /* Limit beta to prevent oscillations */
1517 if (fabs(beta) > 5.0)
1519 beta = 0.0;
1523 /* update positions */
1524 swap_em_state(&s_min, &s_b);
1525 gpa = gpb;
1527 /* Print it if necessary */
1528 if (MASTER(cr))
1530 if (mdrunOptions.verbose)
1532 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1533 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
1534 s_min->epot, s_min->fnorm / sqrtNumAtoms, s_min->fmax, s_min->a_fmax + 1);
1535 fflush(stderr);
1537 /* Store the new (lower) energies */
1538 matrix nullBox = {};
1539 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1540 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
1541 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1543 do_log = do_per_step(step, inputrec->nstlog);
1544 do_ene = do_per_step(step, inputrec->nstenergy);
1546 imdSession->fillEnergyRecord(step, TRUE);
1548 if (do_log)
1550 EnergyOutput::printHeader(fplog, step, step);
1552 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
1553 do_log ? fplog : nullptr, step, step,
1554 fr->fcdata.get(), nullptr);
1557 /* Send energies and positions to the IMD client if bIMD is TRUE. */
1558 if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0))
1560 imdSession->sendPositionsAndEnergies();
1563 /* Stop when the maximum force lies below tolerance.
1564 * If we have reached machine precision, converged is already set to true.
1566 converged = converged || (s_min->fmax < inputrec->em_tol);
1568 } /* End of the loop */
1570 if (converged)
1572 step--; /* we never took that last step in this case */
1574 if (s_min->fmax > inputrec->em_tol)
1576 if (MASTER(cr))
1578 warn_step(fplog, inputrec->em_tol, s_min->fmax, step - 1 == number_steps, FALSE);
1580 converged = FALSE;
1583 if (MASTER(cr))
1585 /* If we printed energy and/or logfile last step (which was the last step)
1586 * we don't have to do it again, but otherwise print the final values.
1588 if (!do_log)
1590 /* Write final value to log since we didn't do anything the last step */
1591 EnergyOutput::printHeader(fplog, step, step);
1593 if (!do_ene || !do_log)
1595 /* Write final energy file entries */
1596 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
1597 !do_log ? fplog : nullptr, step, step,
1598 fr->fcdata.get(), nullptr);
1602 /* Print some stuff... */
1603 if (MASTER(cr))
1605 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
1608 /* IMPORTANT!
1609 * For accurate normal mode calculation it is imperative that we
1610 * store the last conformation into the full precision binary trajectory.
1612 * However, we should only do it if we did NOT already write this step
1613 * above (which we did if do_x or do_f was true).
1615 /* Note that with 0 < nstfout != nstxout we can end up with two frames
1616 * in the trajectory with the same step number.
1618 do_x = !do_per_step(step, inputrec->nstxout);
1619 do_f = (inputrec->nstfout > 0 && !do_per_step(step, inputrec->nstfout));
1621 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
1622 step, s_min, state_global, observablesHistory);
1625 if (MASTER(cr))
1627 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1628 print_converged(stderr, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1629 print_converged(fplog, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1631 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
1634 finish_em(cr, outf, walltime_accounting, wcycle);
1636 /* To print the actual number of steps we needed somewhere */
1637 walltime_accounting_set_nsteps_done(walltime_accounting, step);
1641 void LegacySimulator::do_lbfgs()
1643 static const char* LBFGS = "Low-Memory BFGS Minimizer";
1644 em_state_t ems;
1645 gmx_localtop_t top(top_global->ffparams);
1646 gmx_global_stat_t gstat;
1647 int ncorr, nmaxcorr, point, cp, neval, nminstep;
1648 double stepsize, step_taken, gpa, gpb, gpc, tmp, minstep;
1649 real * rho, *alpha, *p, *s, **dx, **dg;
1650 real a, b, c, maxdelta, delta;
1651 real diag, Epot0;
1652 real dgdx, dgdg, sq, yr, beta;
1653 gmx_bool converged;
1654 rvec mu_tot = { 0 };
1655 gmx_bool do_log, do_ene, do_x, do_f, foundlower, *frozen;
1656 tensor vir, pres;
1657 int start, end, number_steps;
1658 int i, k, m, n, gf, step;
1659 int mdof_flags;
1660 auto mdatoms = mdAtoms->mdatoms();
1662 GMX_LOG(mdlog.info)
1663 .asParagraph()
1664 .appendText(
1665 "Note that activating L-BFGS energy minimization via the "
1666 "integrator .mdp option and the command gmx mdrun may "
1667 "be available in a different form in a future version of GROMACS, "
1668 "e.g. gmx minimize and an .mdp option.");
1670 if (PAR(cr))
1672 gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
1675 if (nullptr != constr)
1677 gmx_fatal(
1678 FARGS,
1679 "The combination of constraints and L-BFGS minimization is not implemented. Either "
1680 "do not use constraints, or use another minimizer (e.g. steepest descent).");
1683 n = 3 * state_global->natoms;
1684 nmaxcorr = inputrec->nbfgscorr;
1686 snew(frozen, n);
1688 snew(p, n);
1689 snew(rho, nmaxcorr);
1690 snew(alpha, nmaxcorr);
1692 snew(dx, nmaxcorr);
1693 for (i = 0; i < nmaxcorr; i++)
1695 snew(dx[i], n);
1698 snew(dg, nmaxcorr);
1699 for (i = 0; i < nmaxcorr; i++)
1701 snew(dg[i], n);
1704 step = 0;
1705 neval = 0;
1707 /* Init em */
1708 init_em(fplog, mdlog, LBFGS, cr, inputrec, imdSession, pull_work, state_global, top_global,
1709 &ems, &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, nullptr);
1710 const bool simulationsShareState = false;
1711 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
1712 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
1713 StartingBehavior::NewSimulation, simulationsShareState, ms);
1714 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work,
1715 nullptr, false, StartingBehavior::NewSimulation,
1716 simulationsShareState, mdModulesNotifier);
1718 start = 0;
1719 end = mdatoms->homenr;
1721 /* We need 4 working states */
1722 em_state_t s0{}, s1{}, s2{}, s3{};
1723 em_state_t* sa = &s0;
1724 em_state_t* sb = &s1;
1725 em_state_t* sc = &s2;
1726 em_state_t* last = &s3;
1727 /* Initialize by copying the state from ems (we could skip x and f here) */
1728 *sa = ems;
1729 *sb = ems;
1730 *sc = ems;
1732 /* Print to log file */
1733 print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
1735 do_log = do_ene = do_x = do_f = TRUE;
1737 /* Max number of steps */
1738 number_steps = inputrec->nsteps;
1740 /* Create a 3*natoms index to tell whether each degree of freedom is frozen */
1741 gf = 0;
1742 for (i = start; i < end; i++)
1744 if (mdatoms->cFREEZE)
1746 gf = mdatoms->cFREEZE[i];
1748 for (m = 0; m < DIM; m++)
1750 frozen[3 * i + m] = (inputrec->opts.nFreeze[gf][m] != 0);
1753 if (MASTER(cr))
1755 sp_header(stderr, LBFGS, inputrec->em_tol, number_steps);
1757 if (fplog)
1759 sp_header(fplog, LBFGS, inputrec->em_tol, number_steps);
1762 if (vsite)
1764 vsite->construct(state_global->x, 1, {}, state_global->box);
1767 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1768 /* do_force always puts the charge groups in the box and shifts again
1769 * We do not unshift, so molecules are always whole
1771 neval++;
1772 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
1773 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
1774 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
1775 energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
1777 if (MASTER(cr))
1779 /* Copy stuff to the energy bin for easy printing etc. */
1780 matrix nullBox = {};
1781 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1782 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
1783 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1785 EnergyOutput::printHeader(fplog, step, step);
1786 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
1787 step, fr->fcdata.get(), nullptr);
1790 /* Set the initial step.
1791 * since it will be multiplied by the non-normalized search direction
1792 * vector (force vector the first time), we scale it by the
1793 * norm of the force.
1796 if (MASTER(cr))
1798 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1799 fprintf(stderr, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1800 fprintf(stderr, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1801 fprintf(stderr, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
1802 fprintf(stderr, "\n");
1803 /* and copy to the log file too... */
1804 fprintf(fplog, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1805 fprintf(fplog, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1806 fprintf(fplog, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
1807 fprintf(fplog, "\n");
1810 // Point is an index to the memory of search directions, where 0 is the first one.
1811 point = 0;
1813 // Set initial search direction to the force (-gradient), or 0 for frozen particles.
1814 real* fInit = static_cast<real*>(ems.f.view().force().data()[0]);
1815 for (i = 0; i < n; i++)
1817 if (!frozen[i])
1819 dx[point][i] = fInit[i]; /* Initial search direction */
1821 else
1823 dx[point][i] = 0;
1827 // Stepsize will be modified during the search, and actually it is not critical
1828 // (the main efficiency in the algorithm comes from changing directions), but
1829 // we still need an initial value, so estimate it as the inverse of the norm
1830 // so we take small steps where the potential fluctuates a lot.
1831 stepsize = 1.0 / ems.fnorm;
1833 /* Start the loop over BFGS steps.
1834 * Each successful step is counted, and we continue until
1835 * we either converge or reach the max number of steps.
1838 ncorr = 0;
1840 /* Set the gradient from the force */
1841 converged = FALSE;
1842 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1845 /* Write coordinates if necessary */
1846 do_x = do_per_step(step, inputrec->nstxout);
1847 do_f = do_per_step(step, inputrec->nstfout);
1849 mdof_flags = 0;
1850 if (do_x)
1852 mdof_flags |= MDOF_X;
1855 if (do_f)
1857 mdof_flags |= MDOF_F;
1860 if (inputrec->bIMD)
1862 mdof_flags |= MDOF_IMD;
1865 gmx::WriteCheckpointDataHolder checkpointDataHolder;
1866 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
1867 static_cast<real>(step), &ems.s, state_global, observablesHistory,
1868 ems.f.view().force(), &checkpointDataHolder);
1870 /* Do the linesearching in the direction dx[point][0..(n-1)] */
1872 /* make s a pointer to current search direction - point=0 first time we get here */
1873 s = dx[point];
1875 real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
1876 real* ff = static_cast<real*>(ems.f.view().force().data()[0]);
1878 // calculate line gradient in position A
1879 for (gpa = 0, i = 0; i < n; i++)
1881 gpa -= s[i] * ff[i];
1884 /* Calculate minimum allowed stepsize along the line, before the average (norm)
1885 * relative change in coordinate is smaller than precision
1887 for (minstep = 0, i = 0; i < n; i++)
1889 tmp = fabs(xx[i]);
1890 if (tmp < 1.0)
1892 tmp = 1.0;
1894 tmp = s[i] / tmp;
1895 minstep += tmp * tmp;
1897 minstep = GMX_REAL_EPS / sqrt(minstep / n);
1899 if (stepsize < minstep)
1901 converged = TRUE;
1902 break;
1905 // Before taking any steps along the line, store the old position
1906 *last = ems;
1907 real* lastx = static_cast<real*>(last->s.x.data()[0]);
1908 real* lastf = static_cast<real*>(last->f.view().force().data()[0]);
1909 Epot0 = ems.epot;
1911 *sa = ems;
1913 /* Take a step downhill.
1914 * In theory, we should find the actual minimum of the function in this
1915 * direction, somewhere along the line.
1916 * That is quite possible, but it turns out to take 5-10 function evaluations
1917 * for each line. However, we dont really need to find the exact minimum -
1918 * it is much better to start a new BFGS step in a modified direction as soon
1919 * as we are close to it. This will save a lot of energy evaluations.
1921 * In practice, we just try to take a single step.
1922 * If it worked (i.e. lowered the energy), we increase the stepsize but
1923 * continue straight to the next BFGS step without trying to find any minimum,
1924 * i.e. we change the search direction too. If the line was smooth, it is
1925 * likely we are in a smooth region, and then it makes sense to take longer
1926 * steps in the modified search direction too.
1928 * If it didn't work (higher energy), there must be a minimum somewhere between
1929 * the old position and the new one. Then we need to start by finding a lower
1930 * value before we change search direction. Since the energy was apparently
1931 * quite rough, we need to decrease the step size.
1933 * Due to the finite numerical accuracy, it turns out that it is a good idea
1934 * to accept a SMALL increase in energy, if the derivative is still downhill.
1935 * This leads to lower final energies in the tests I've done. / Erik
1938 // State "A" is the first position along the line.
1939 // reference position along line is initially zero
1940 a = 0.0;
1942 // Check stepsize first. We do not allow displacements
1943 // larger than emstep.
1947 // Pick a new position C by adding stepsize to A.
1948 c = a + stepsize;
1950 // Calculate what the largest change in any individual coordinate
1951 // would be (translation along line * gradient along line)
1952 maxdelta = 0;
1953 for (i = 0; i < n; i++)
1955 delta = c * s[i];
1956 if (delta > maxdelta)
1958 maxdelta = delta;
1961 // If any displacement is larger than the stepsize limit, reduce the step
1962 if (maxdelta > inputrec->em_stepsize)
1964 stepsize *= 0.1;
1966 } while (maxdelta > inputrec->em_stepsize);
1968 // Take a trial step and move the coordinate array xc[] to position C
1969 real* xc = static_cast<real*>(sc->s.x.rvec_array()[0]);
1970 for (i = 0; i < n; i++)
1972 xc[i] = lastx[i] + c * s[i];
1975 neval++;
1976 // Calculate energy for the trial step in position C
1977 energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
1979 // Calc line gradient in position C
1980 real* fc = static_cast<real*>(sc->f.view().force()[0]);
1981 for (gpc = 0, i = 0; i < n; i++)
1983 gpc -= s[i] * fc[i]; /* f is negative gradient, thus the sign */
1985 /* Sum the gradient along the line across CPUs */
1986 if (PAR(cr))
1988 gmx_sumd(1, &gpc, cr);
1991 // This is the max amount of increase in energy we tolerate.
1992 // By allowing VERY small changes (close to numerical precision) we
1993 // frequently find even better (lower) final energies.
1994 tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
1996 // Accept the step if the energy is lower in the new position C (compared to A),
1997 // or if it is not significantly higher and the line derivative is still negative.
1998 foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
1999 // If true, great, we found a better energy. We no longer try to alter the
2000 // stepsize, but simply accept this new better position. The we select a new
2001 // search direction instead, which will be much more efficient than continuing
2002 // to take smaller steps along a line. Set fnorm based on the new C position,
2003 // which will be used to update the stepsize to 1/fnorm further down.
2005 // If false, the energy is NOT lower in point C, i.e. it will be the same
2006 // or higher than in point A. In this case it is pointless to move to point C,
2007 // so we will have to do more iterations along the same line to find a smaller
2008 // value in the interval [A=0.0,C].
2009 // Here, A is still 0.0, but that will change when we do a search in the interval
2010 // [0.0,C] below. That search we will do by interpolation or bisection rather
2011 // than with the stepsize, so no need to modify it. For the next search direction
2012 // it will be reset to 1/fnorm anyway.
2014 if (!foundlower)
2016 // OK, if we didn't find a lower value we will have to locate one now - there must
2017 // be one in the interval [a,c].
2018 // The same thing is valid here, though: Don't spend dozens of iterations to find
2019 // the line minimum. We try to interpolate based on the derivative at the endpoints,
2020 // and only continue until we find a lower value. In most cases this means 1-2 iterations.
2021 // I also have a safeguard for potentially really pathological functions so we never
2022 // take more than 20 steps before we give up.
2023 // If we already found a lower value we just skip this step and continue to the update.
2024 real fnorm = 0;
2025 nminstep = 0;
2028 // Select a new trial point B in the interval [A,C].
2029 // If the derivatives at points a & c have different sign we interpolate to zero,
2030 // otherwise just do a bisection since there might be multiple minima/maxima
2031 // inside the interval.
2032 if (gpa < 0 && gpc > 0)
2034 b = a + gpa * (a - c) / (gpc - gpa);
2036 else
2038 b = 0.5 * (a + c);
2041 /* safeguard if interpolation close to machine accuracy causes errors:
2042 * never go outside the interval
2044 if (b <= a || b >= c)
2046 b = 0.5 * (a + c);
2049 // Take a trial step to point B
2050 real* xb = static_cast<real*>(sb->s.x.rvec_array()[0]);
2051 for (i = 0; i < n; i++)
2053 xb[i] = lastx[i] + b * s[i];
2056 neval++;
2057 // Calculate energy for the trial step in point B
2058 energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
2059 fnorm = sb->fnorm;
2061 // Calculate gradient in point B
2062 real* fb = static_cast<real*>(sb->f.view().force()[0]);
2063 for (gpb = 0, i = 0; i < n; i++)
2065 gpb -= s[i] * fb[i]; /* f is negative gradient, thus the sign */
2067 /* Sum the gradient along the line across CPUs */
2068 if (PAR(cr))
2070 gmx_sumd(1, &gpb, cr);
2073 // Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
2074 // at the new point B, and rename the endpoints of this new interval A and C.
2075 if (gpb > 0)
2077 /* Replace c endpoint with b */
2078 c = b;
2079 /* copy state b to c */
2080 *sc = *sb;
2082 else
2084 /* Replace a endpoint with b */
2085 a = b;
2086 /* copy state b to a */
2087 *sa = *sb;
2091 * Stop search as soon as we find a value smaller than the endpoints,
2092 * or if the tolerance is below machine precision.
2093 * Never run more than 20 steps, no matter what.
2095 nminstep++;
2096 } while ((sb->epot > sa->epot || sb->epot > sc->epot) && (nminstep < 20));
2098 if (std::fabs(sb->epot - Epot0) < GMX_REAL_EPS || nminstep >= 20)
2100 /* OK. We couldn't find a significantly lower energy.
2101 * If ncorr==0 this was steepest descent, and then we give up.
2102 * If not, reset memory to restart as steepest descent before quitting.
2104 if (ncorr == 0)
2106 /* Converged */
2107 converged = TRUE;
2108 break;
2110 else
2112 /* Reset memory */
2113 ncorr = 0;
2114 /* Search in gradient direction */
2115 for (i = 0; i < n; i++)
2117 dx[point][i] = ff[i];
2119 /* Reset stepsize */
2120 stepsize = 1.0 / fnorm;
2121 continue;
2125 /* Select min energy state of A & C, put the best in xx/ff/Epot
2127 if (sc->epot < sa->epot)
2129 /* Use state C */
2130 ems = *sc;
2131 step_taken = c;
2133 else
2135 /* Use state A */
2136 ems = *sa;
2137 step_taken = a;
2140 else
2142 /* found lower */
2143 /* Use state C */
2144 ems = *sc;
2145 step_taken = c;
2148 /* Update the memory information, and calculate a new
2149 * approximation of the inverse hessian
2152 /* Have new data in Epot, xx, ff */
2153 if (ncorr < nmaxcorr)
2155 ncorr++;
2158 for (i = 0; i < n; i++)
2160 dg[point][i] = lastf[i] - ff[i];
2161 dx[point][i] *= step_taken;
2164 dgdg = 0;
2165 dgdx = 0;
2166 for (i = 0; i < n; i++)
2168 dgdg += dg[point][i] * dg[point][i];
2169 dgdx += dg[point][i] * dx[point][i];
2172 diag = dgdx / dgdg;
2174 rho[point] = 1.0 / dgdx;
2175 point++;
2177 if (point >= nmaxcorr)
2179 point = 0;
2182 /* Update */
2183 for (i = 0; i < n; i++)
2185 p[i] = ff[i];
2188 cp = point;
2190 /* Recursive update. First go back over the memory points */
2191 for (k = 0; k < ncorr; k++)
2193 cp--;
2194 if (cp < 0)
2196 cp = ncorr - 1;
2199 sq = 0;
2200 for (i = 0; i < n; i++)
2202 sq += dx[cp][i] * p[i];
2205 alpha[cp] = rho[cp] * sq;
2207 for (i = 0; i < n; i++)
2209 p[i] -= alpha[cp] * dg[cp][i];
2213 for (i = 0; i < n; i++)
2215 p[i] *= diag;
2218 /* And then go forward again */
2219 for (k = 0; k < ncorr; k++)
2221 yr = 0;
2222 for (i = 0; i < n; i++)
2224 yr += p[i] * dg[cp][i];
2227 beta = rho[cp] * yr;
2228 beta = alpha[cp] - beta;
2230 for (i = 0; i < n; i++)
2232 p[i] += beta * dx[cp][i];
2235 cp++;
2236 if (cp >= ncorr)
2238 cp = 0;
2242 for (i = 0; i < n; i++)
2244 if (!frozen[i])
2246 dx[point][i] = p[i];
2248 else
2250 dx[point][i] = 0;
2254 /* Print it if necessary */
2255 if (MASTER(cr))
2257 if (mdrunOptions.verbose)
2259 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2260 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
2261 ems.epot, ems.fnorm / sqrtNumAtoms, ems.fmax, ems.a_fmax + 1);
2262 fflush(stderr);
2264 /* Store the new (lower) energies */
2265 matrix nullBox = {};
2266 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
2267 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
2268 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
2270 do_log = do_per_step(step, inputrec->nstlog);
2271 do_ene = do_per_step(step, inputrec->nstenergy);
2273 imdSession->fillEnergyRecord(step, TRUE);
2275 if (do_log)
2277 EnergyOutput::printHeader(fplog, step, step);
2279 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
2280 do_log ? fplog : nullptr, step, step,
2281 fr->fcdata.get(), nullptr);
2284 /* Send x and E to IMD client, if bIMD is TRUE. */
2285 if (imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0) && MASTER(cr))
2287 imdSession->sendPositionsAndEnergies();
2290 // Reset stepsize in we are doing more iterations
2291 stepsize = 1.0;
2293 /* Stop when the maximum force lies below tolerance.
2294 * If we have reached machine precision, converged is already set to true.
2296 converged = converged || (ems.fmax < inputrec->em_tol);
2298 } /* End of the loop */
2300 if (converged)
2302 step--; /* we never took that last step in this case */
2304 if (ems.fmax > inputrec->em_tol)
2306 if (MASTER(cr))
2308 warn_step(fplog, inputrec->em_tol, ems.fmax, step - 1 == number_steps, FALSE);
2310 converged = FALSE;
2313 /* If we printed energy and/or logfile last step (which was the last step)
2314 * we don't have to do it again, but otherwise print the final values.
2316 if (!do_log) /* Write final value to log since we didn't do anythin last step */
2318 EnergyOutput::printHeader(fplog, step, step);
2320 if (!do_ene || !do_log) /* Write final energy file entries */
2322 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
2323 !do_log ? fplog : nullptr, step, step, fr->fcdata.get(),
2324 nullptr);
2327 /* Print some stuff... */
2328 if (MASTER(cr))
2330 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2333 /* IMPORTANT!
2334 * For accurate normal mode calculation it is imperative that we
2335 * store the last conformation into the full precision binary trajectory.
2337 * However, we should only do it if we did NOT already write this step
2338 * above (which we did if do_x or do_f was true).
2340 do_x = !do_per_step(step, inputrec->nstxout);
2341 do_f = !do_per_step(step, inputrec->nstfout);
2342 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
2343 step, &ems, state_global, observablesHistory);
2345 if (MASTER(cr))
2347 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2348 print_converged(stderr, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2349 print_converged(fplog, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2351 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
2354 finish_em(cr, outf, walltime_accounting, wcycle);
2356 /* To print the actual number of steps we needed somewhere */
2357 walltime_accounting_set_nsteps_done(walltime_accounting, step);
2360 void LegacySimulator::do_steep()
2362 const char* SD = "Steepest Descents";
2363 gmx_localtop_t top(top_global->ffparams);
2364 gmx_global_stat_t gstat;
2365 real stepsize;
2366 real ustep;
2367 gmx_bool bDone, bAbort, do_x, do_f;
2368 tensor vir, pres;
2369 rvec mu_tot = { 0 };
2370 int nsteps;
2371 int count = 0;
2372 int steps_accepted = 0;
2373 auto mdatoms = mdAtoms->mdatoms();
2375 GMX_LOG(mdlog.info)
2376 .asParagraph()
2377 .appendText(
2378 "Note that activating steepest-descent energy minimization via the "
2379 "integrator .mdp option and the command gmx mdrun may "
2380 "be available in a different form in a future version of GROMACS, "
2381 "e.g. gmx minimize and an .mdp option.");
2383 /* Create 2 states on the stack and extract pointers that we will swap */
2384 em_state_t s0{}, s1{};
2385 em_state_t* s_min = &s0;
2386 em_state_t* s_try = &s1;
2388 /* Init em and store the local state in s_try */
2389 init_em(fplog, mdlog, SD, cr, inputrec, imdSession, pull_work, state_global, top_global, s_try,
2390 &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, nullptr);
2391 const bool simulationsShareState = false;
2392 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
2393 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
2394 StartingBehavior::NewSimulation, simulationsShareState, ms);
2395 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work,
2396 nullptr, false, StartingBehavior::NewSimulation,
2397 simulationsShareState, mdModulesNotifier);
2399 /* Print to log file */
2400 print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
2402 /* Set variables for stepsize (in nm). This is the largest
2403 * step that we are going to make in any direction.
2405 ustep = inputrec->em_stepsize;
2406 stepsize = 0;
2408 /* Max number of steps */
2409 nsteps = inputrec->nsteps;
2411 if (MASTER(cr))
2413 /* Print to the screen */
2414 sp_header(stderr, SD, inputrec->em_tol, nsteps);
2416 if (fplog)
2418 sp_header(fplog, SD, inputrec->em_tol, nsteps);
2420 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
2421 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
2422 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
2424 /**** HERE STARTS THE LOOP ****
2425 * count is the counter for the number of steps
2426 * bDone will be TRUE when the minimization has converged
2427 * bAbort will be TRUE when nsteps steps have been performed or when
2428 * the stepsize becomes smaller than is reasonable for machine precision
2430 count = 0;
2431 bDone = FALSE;
2432 bAbort = FALSE;
2433 while (!bDone && !bAbort)
2435 bAbort = (nsteps >= 0) && (count == nsteps);
2437 /* set new coordinates, except for first step */
2438 bool validStep = true;
2439 if (count > 0)
2441 validStep = do_em_step(cr, inputrec, mdatoms, s_min, stepsize,
2442 s_min->f.view().forceWithPadding(), s_try, constr, count);
2445 if (validStep)
2447 energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
2449 else
2451 // Signal constraint error during stepping with energy=inf
2452 s_try->epot = std::numeric_limits<real>::infinity();
2455 if (MASTER(cr))
2457 EnergyOutput::printHeader(fplog, count, count);
2460 if (count == 0)
2462 s_min->epot = s_try->epot;
2465 /* Print it if necessary */
2466 if (MASTER(cr))
2468 if (mdrunOptions.verbose)
2470 fprintf(stderr, "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
2471 count, ustep, s_try->epot, s_try->fmax, s_try->a_fmax + 1,
2472 ((count == 0) || (s_try->epot < s_min->epot)) ? '\n' : '\r');
2473 fflush(stderr);
2476 if ((count == 0) || (s_try->epot < s_min->epot))
2478 /* Store the new (lower) energies */
2479 matrix nullBox = {};
2480 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(count),
2481 mdatoms->tmass, enerd, nullptr, nullptr, nullBox,
2482 PTCouplingArrays(), 0, nullptr, nullptr, vir, pres,
2483 nullptr, mu_tot, constr);
2485 imdSession->fillEnergyRecord(count, TRUE);
2487 const bool do_dr = do_per_step(steps_accepted, inputrec->nstdisreout);
2488 const bool do_or = do_per_step(steps_accepted, inputrec->nstorireout);
2489 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, do_dr, do_or,
2490 fplog, count, count, fr->fcdata.get(), nullptr);
2491 fflush(fplog);
2495 /* Now if the new energy is smaller than the previous...
2496 * or if this is the first step!
2497 * or if we did random steps!
2500 if ((count == 0) || (s_try->epot < s_min->epot))
2502 steps_accepted++;
2504 /* Test whether the convergence criterion is met... */
2505 bDone = (s_try->fmax < inputrec->em_tol);
2507 /* Copy the arrays for force, positions and energy */
2508 /* The 'Min' array always holds the coords and forces of the minimal
2509 sampled energy */
2510 swap_em_state(&s_min, &s_try);
2511 if (count > 0)
2513 ustep *= 1.2;
2516 /* Write to trn, if necessary */
2517 do_x = do_per_step(steps_accepted, inputrec->nstxout);
2518 do_f = do_per_step(steps_accepted, inputrec->nstfout);
2519 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, count, s_min,
2520 state_global, observablesHistory);
2522 else
2524 /* If energy is not smaller make the step smaller... */
2525 ustep *= 0.5;
2527 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
2529 /* Reload the old state */
2530 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession,
2531 pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
2535 // If the force is very small after finishing minimization,
2536 // we risk dividing by zero when calculating the step size.
2537 // So we check first if the minimization has stopped before
2538 // trying to obtain a new step size.
2539 if (!bDone)
2541 /* Determine new step */
2542 stepsize = ustep / s_min->fmax;
2545 /* Check if stepsize is too small, with 1 nm as a characteristic length */
2546 #if GMX_DOUBLE
2547 if (count == nsteps || ustep < 1e-12)
2548 #else
2549 if (count == nsteps || ustep < 1e-6)
2550 #endif
2552 if (MASTER(cr))
2554 warn_step(fplog, inputrec->em_tol, s_min->fmax, count == nsteps, constr != nullptr);
2556 bAbort = TRUE;
2559 /* Send IMD energies and positions, if bIMD is TRUE. */
2560 if (imdSession->run(count, TRUE, MASTER(cr) ? state_global->box : nullptr,
2561 MASTER(cr) ? state_global->x.rvec_array() : nullptr, 0)
2562 && MASTER(cr))
2564 imdSession->sendPositionsAndEnergies();
2567 count++;
2568 } /* End of the loop */
2570 /* Print some data... */
2571 if (MASTER(cr))
2573 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2575 write_em_traj(fplog, cr, outf, TRUE, inputrec->nstfout != 0, ftp2fn(efSTO, nfile, fnm),
2576 top_global, inputrec, count, s_min, state_global, observablesHistory);
2578 if (MASTER(cr))
2580 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2582 print_converged(stderr, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
2583 print_converged(fplog, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
2586 finish_em(cr, outf, walltime_accounting, wcycle);
2588 /* To print the actual number of steps we needed somewhere */
2589 inputrec->nsteps = count;
2591 walltime_accounting_set_nsteps_done(walltime_accounting, count);
2594 void LegacySimulator::do_nm()
2596 const char* NM = "Normal Mode Analysis";
2597 int nnodes;
2598 gmx_localtop_t top(top_global->ffparams);
2599 gmx_global_stat_t gstat;
2600 tensor vir, pres;
2601 rvec mu_tot = { 0 };
2602 rvec* dfdx;
2603 gmx_bool bSparse; /* use sparse matrix storage format */
2604 size_t sz;
2605 gmx_sparsematrix_t* sparse_matrix = nullptr;
2606 real* full_matrix = nullptr;
2608 /* added with respect to mdrun */
2609 int row, col;
2610 real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
2611 real x_min;
2612 bool bIsMaster = MASTER(cr);
2613 auto mdatoms = mdAtoms->mdatoms();
2615 GMX_LOG(mdlog.info)
2616 .asParagraph()
2617 .appendText(
2618 "Note that activating normal-mode analysis via the integrator "
2619 ".mdp option and the command gmx mdrun may "
2620 "be available in a different form in a future version of GROMACS, "
2621 "e.g. gmx normal-modes.");
2623 if (constr != nullptr)
2625 gmx_fatal(
2626 FARGS,
2627 "Constraints present with Normal Mode Analysis, this combination is not supported");
2630 gmx_shellfc_t* shellfc;
2632 em_state_t state_work{};
2634 /* Init em and store the local state in state_minimum */
2635 init_em(fplog, mdlog, NM, cr, inputrec, imdSession, pull_work, state_global, top_global,
2636 &state_work, &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, &shellfc);
2637 const bool simulationsShareState = false;
2638 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
2639 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
2640 StartingBehavior::NewSimulation, simulationsShareState, ms);
2642 std::vector<int> atom_index = get_atom_index(top_global);
2643 std::vector<gmx::RVec> fneg(atom_index.size(), { 0, 0, 0 });
2644 snew(dfdx, atom_index.size());
2646 #if !GMX_DOUBLE
2647 if (bIsMaster)
2649 fprintf(stderr,
2650 "NOTE: This version of GROMACS has been compiled in single precision,\n"
2651 " which MIGHT not be accurate enough for normal mode analysis.\n"
2652 " GROMACS now uses sparse matrix storage, so the memory requirements\n"
2653 " are fairly modest even if you recompile in double precision.\n\n");
2655 #endif
2657 /* Check if we can/should use sparse storage format.
2659 * Sparse format is only useful when the Hessian itself is sparse, which it
2660 * will be when we use a cutoff.
2661 * For small systems (n<1000) it is easier to always use full matrix format, though.
2663 if (EEL_FULL(fr->ic->eeltype) || fr->rlist == 0.0)
2665 GMX_LOG(mdlog.warning)
2666 .appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
2667 bSparse = FALSE;
2669 else if (atom_index.size() < 1000)
2671 GMX_LOG(mdlog.warning)
2672 .appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
2673 atom_index.size());
2674 bSparse = FALSE;
2676 else
2678 GMX_LOG(mdlog.warning).appendText("Using compressed symmetric sparse Hessian format.");
2679 bSparse = TRUE;
2682 /* Number of dimensions, based on real atoms, that is not vsites or shell */
2683 sz = DIM * atom_index.size();
2685 fprintf(stderr, "Allocating Hessian memory...\n\n");
2687 if (bSparse)
2689 sparse_matrix = gmx_sparsematrix_init(sz);
2690 sparse_matrix->compressed_symmetric = TRUE;
2692 else
2694 snew(full_matrix, sz * sz);
2697 /* Write start time and temperature */
2698 print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
2700 /* fudge nr of steps to nr of atoms */
2701 inputrec->nsteps = atom_index.size() * 2;
2703 if (bIsMaster)
2705 fprintf(stderr, "starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
2706 *(top_global->name), inputrec->nsteps);
2709 nnodes = cr->nnodes;
2711 /* Make evaluate_energy do a single node force calculation */
2712 cr->nnodes = 1;
2713 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
2714 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
2715 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
2716 energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
2717 cr->nnodes = nnodes;
2719 /* if forces are not small, warn user */
2720 get_state_f_norm_max(cr, &(inputrec->opts), mdatoms, &state_work);
2722 GMX_LOG(mdlog.warning).appendTextFormatted("Maximum force:%12.5e", state_work.fmax);
2723 if (state_work.fmax > 1.0e-3)
2725 GMX_LOG(mdlog.warning)
2726 .appendText(
2727 "The force is probably not small enough to "
2728 "ensure that you are at a minimum.\n"
2729 "Be aware that negative eigenvalues may occur\n"
2730 "when the resulting matrix is diagonalized.");
2733 /***********************************************************
2735 * Loop over all pairs in matrix
2737 * do_force called twice. Once with positive and
2738 * once with negative displacement
2740 ************************************************************/
2742 /* Steps are divided one by one over the nodes */
2743 bool bNS = true;
2744 auto state_work_x = makeArrayRef(state_work.s.x);
2745 auto state_work_f = state_work.f.view().force();
2746 for (index aid = cr->nodeid; aid < ssize(atom_index); aid += nnodes)
2748 size_t atom = atom_index[aid];
2749 for (size_t d = 0; d < DIM; d++)
2751 int64_t step = 0;
2752 int force_flags = GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES;
2753 double t = 0;
2755 x_min = state_work_x[atom][d];
2757 for (unsigned int dx = 0; (dx < 2); dx++)
2759 if (dx == 0)
2761 state_work_x[atom][d] = x_min - der_range;
2763 else
2765 state_work_x[atom][d] = x_min + der_range;
2768 /* Make evaluate_energy do a single node force calculation */
2769 cr->nnodes = 1;
2770 if (shellfc)
2772 /* Now is the time to relax the shells */
2773 relax_shell_flexcon(fplog, cr, ms, mdrunOptions.verbose, nullptr, step, inputrec,
2774 imdSession, pull_work, bNS, force_flags, &top, constr, enerd,
2775 state_work.s.natoms, state_work.s.x.arrayRefWithPadding(),
2776 state_work.s.v.arrayRefWithPadding(), state_work.s.box,
2777 state_work.s.lambda, &state_work.s.hist, &state_work.f.view(),
2778 vir, mdatoms, nrnb, wcycle, shellfc, fr, runScheduleWork, t,
2779 mu_tot, vsite, DDBalanceRegionHandler(nullptr));
2780 bNS = false;
2781 step++;
2783 else
2785 energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE);
2788 cr->nnodes = nnodes;
2790 if (dx == 0)
2792 std::copy(state_work_f.begin(), state_work_f.begin() + atom_index.size(),
2793 fneg.begin());
2797 /* x is restored to original */
2798 state_work_x[atom][d] = x_min;
2800 for (size_t j = 0; j < atom_index.size(); j++)
2802 for (size_t k = 0; (k < DIM); k++)
2804 dfdx[j][k] = -(state_work_f[atom_index[j]][k] - fneg[j][k]) / (2 * der_range);
2808 if (!bIsMaster)
2810 #if GMX_MPI
2811 # define mpi_type GMX_MPI_REAL
2812 MPI_Send(dfdx[0], atom_index.size() * DIM, mpi_type, MASTER(cr), cr->nodeid,
2813 cr->mpi_comm_mygroup);
2814 #endif
2816 else
2818 for (index node = 0; (node < nnodes && aid + node < ssize(atom_index)); node++)
2820 if (node > 0)
2822 #if GMX_MPI
2823 MPI_Status stat;
2824 MPI_Recv(dfdx[0], atom_index.size() * DIM, mpi_type, node, node,
2825 cr->mpi_comm_mygroup, &stat);
2826 # undef mpi_type
2827 #endif
2830 row = (aid + node) * DIM + d;
2832 for (size_t j = 0; j < atom_index.size(); j++)
2834 for (size_t k = 0; k < DIM; k++)
2836 col = j * DIM + k;
2838 if (bSparse)
2840 if (col >= row && dfdx[j][k] != 0.0)
2842 gmx_sparsematrix_increment_value(sparse_matrix, row, col, dfdx[j][k]);
2845 else
2847 full_matrix[row * sz + col] = dfdx[j][k];
2854 if (mdrunOptions.verbose && fplog)
2856 fflush(fplog);
2859 /* write progress */
2860 if (bIsMaster && mdrunOptions.verbose)
2862 fprintf(stderr, "\rFinished step %d out of %td",
2863 std::min<int>(atom + nnodes, atom_index.size()), ssize(atom_index));
2864 fflush(stderr);
2868 if (bIsMaster)
2870 fprintf(stderr, "\n\nWriting Hessian...\n");
2871 gmx_mtxio_write(ftp2fn(efMTX, nfile, fnm), sz, sz, full_matrix, sparse_matrix);
2874 finish_em(cr, outf, walltime_accounting, wcycle);
2876 walltime_accounting_set_nsteps_done(walltime_accounting, atom_index.size() * 2);
2879 } // namespace gmx