2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file defines integrators for energy minimization
41 * \author Berk Hess <hess@kth.se>
42 * \author Erik Lindahl <erik@kth.se>
43 * \ingroup module_mdrun
56 #include "gromacs/commandline/filenm.h"
57 #include "gromacs/domdec/collect.h"
58 #include "gromacs/domdec/dlbtiming.h"
59 #include "gromacs/domdec/domdec.h"
60 #include "gromacs/domdec/domdec_struct.h"
61 #include "gromacs/domdec/mdsetup.h"
62 #include "gromacs/domdec/partition.h"
63 #include "gromacs/ewald/pme.h"
64 #include "gromacs/fileio/confio.h"
65 #include "gromacs/fileio/mtxio.h"
66 #include "gromacs/gmxlib/network.h"
67 #include "gromacs/gmxlib/nrnb.h"
68 #include "gromacs/imd/imd.h"
69 #include "gromacs/linearalgebra/sparsematrix.h"
70 #include "gromacs/listed_forces/manage_threading.h"
71 #include "gromacs/math/functions.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/mdlib/constr.h"
74 #include "gromacs/mdlib/dispersioncorrection.h"
75 #include "gromacs/mdlib/ebin.h"
76 #include "gromacs/mdlib/enerdata_utils.h"
77 #include "gromacs/mdlib/energyoutput.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/md_support.h"
82 #include "gromacs/mdlib/mdatoms.h"
83 #include "gromacs/mdlib/stat.h"
84 #include "gromacs/mdlib/tgroup.h"
85 #include "gromacs/mdlib/trajectory_writing.h"
86 #include "gromacs/mdlib/update.h"
87 #include "gromacs/mdlib/vsite.h"
88 #include "gromacs/mdrunutility/handlerestart.h"
89 #include "gromacs/mdrunutility/printtime.h"
90 #include "gromacs/mdtypes/commrec.h"
91 #include "gromacs/mdtypes/inputrec.h"
92 #include "gromacs/mdtypes/md_enums.h"
93 #include "gromacs/mdtypes/mdrunoptions.h"
94 #include "gromacs/mdtypes/state.h"
95 #include "gromacs/pbcutil/mshift.h"
96 #include "gromacs/pbcutil/pbc.h"
97 #include "gromacs/timing/wallcycle.h"
98 #include "gromacs/timing/walltime_accounting.h"
99 #include "gromacs/topology/mtop_util.h"
100 #include "gromacs/topology/topology.h"
101 #include "gromacs/utility/cstringutil.h"
102 #include "gromacs/utility/exceptions.h"
103 #include "gromacs/utility/fatalerror.h"
104 #include "gromacs/utility/logger.h"
105 #include "gromacs/utility/smalloc.h"
107 #include "legacysimulator.h"
110 //! Utility structure for manipulating states during EM
112 //! Copy of the global state
115 PaddedHostVector
<gmx::RVec
> f
;
118 //! Norm of the force
126 //! Print the EM starting conditions
127 static void print_em_start(FILE *fplog
,
129 gmx_walltime_accounting_t walltime_accounting
,
130 gmx_wallcycle_t wcycle
,
133 walltime_accounting_start_time(walltime_accounting
);
134 wallcycle_start(wcycle
, ewcRUN
);
135 print_start(fplog
, cr
, walltime_accounting
, name
);
138 //! Stop counting time for EM
139 static void em_time_end(gmx_walltime_accounting_t walltime_accounting
,
140 gmx_wallcycle_t wcycle
)
142 wallcycle_stop(wcycle
, ewcRUN
);
144 walltime_accounting_end_time(walltime_accounting
);
147 //! Printing a log file and console header
148 static void sp_header(FILE *out
, const char *minimizer
, real ftol
, int nsteps
)
151 fprintf(out
, "%s:\n", minimizer
);
152 fprintf(out
, " Tolerance (Fmax) = %12.5e\n", ftol
);
153 fprintf(out
, " Number of steps = %12d\n", nsteps
);
156 //! Print warning message
157 static void warn_step(FILE *fp
,
163 constexpr bool realIsDouble
= GMX_DOUBLE
;
166 if (!std::isfinite(fmax
))
169 "\nEnergy minimization has stopped because the force "
170 "on at least one atom is not finite. This usually means "
171 "atoms are overlapping. Modify the input coordinates to "
172 "remove atom overlap or use soft-core potentials with "
173 "the free energy code to avoid infinite forces.\n%s",
175 "You could also be lucky that switching to double precision "
176 "is sufficient to obtain finite forces.\n" :
182 "\nEnergy minimization reached the maximum number "
183 "of steps before the forces reached the requested "
184 "precision Fmax < %g.\n", ftol
);
189 "\nEnergy minimization has stopped, but the forces have "
190 "not converged to the requested precision Fmax < %g (which "
191 "may not be possible for your system). It stopped "
192 "because the algorithm tried to make a new step whose size "
193 "was too small, or there was no change in the energy since "
194 "last step. Either way, we regard the minimization as "
195 "converged to within the available machine precision, "
196 "given your starting configuration and EM parameters.\n%s%s",
199 "\nDouble precision normally gives you higher accuracy, but "
200 "this is often not needed for preparing to run molecular "
204 "You might need to increase your constraint accuracy, or turn\n"
205 "off constraints altogether (set constraints = none in mdp file)\n" :
209 fputs(wrap_lines(buffer
, 78, 0, FALSE
), stderr
);
210 fputs(wrap_lines(buffer
, 78, 0, FALSE
), fp
);
213 //! Print message about convergence of the EM
214 static void print_converged(FILE *fp
, const char *alg
, real ftol
,
215 int64_t count
, gmx_bool bDone
, int64_t nsteps
,
216 const em_state_t
*ems
, double sqrtNumAtoms
)
218 char buf
[STEPSTRSIZE
];
222 fprintf(fp
, "\n%s converged to Fmax < %g in %s steps\n",
223 alg
, ftol
, gmx_step_str(count
, buf
));
225 else if (count
< nsteps
)
227 fprintf(fp
, "\n%s converged to machine precision in %s steps,\n"
228 "but did not reach the requested Fmax < %g.\n",
229 alg
, gmx_step_str(count
, buf
), ftol
);
233 fprintf(fp
, "\n%s did not converge to Fmax < %g in %s steps.\n",
234 alg
, ftol
, gmx_step_str(count
, buf
));
238 fprintf(fp
, "Potential Energy = %21.14e\n", ems
->epot
);
239 fprintf(fp
, "Maximum force = %21.14e on atom %d\n", ems
->fmax
, ems
->a_fmax
+ 1);
240 fprintf(fp
, "Norm of force = %21.14e\n", ems
->fnorm
/sqrtNumAtoms
);
242 fprintf(fp
, "Potential Energy = %14.7e\n", ems
->epot
);
243 fprintf(fp
, "Maximum force = %14.7e on atom %d\n", ems
->fmax
, ems
->a_fmax
+ 1);
244 fprintf(fp
, "Norm of force = %14.7e\n", ems
->fnorm
/sqrtNumAtoms
);
248 //! Compute the norm and max of the force array in parallel
249 static void get_f_norm_max(const t_commrec
*cr
,
250 t_grpopts
*opts
, t_mdatoms
*mdatoms
, const rvec
*f
,
251 real
*fnorm
, real
*fmax
, int *a_fmax
)
255 int la_max
, a_max
, start
, end
, i
, m
, gf
;
257 /* This routine finds the largest force and returns it.
258 * On parallel machines the global max is taken.
264 end
= mdatoms
->homenr
;
265 if (mdatoms
->cFREEZE
)
267 for (i
= start
; i
< end
; i
++)
269 gf
= mdatoms
->cFREEZE
[i
];
271 for (m
= 0; m
< DIM
; m
++)
273 if (!opts
->nFreeze
[gf
][m
])
275 fam
+= gmx::square(f
[i
][m
]);
288 for (i
= start
; i
< end
; i
++)
300 if (la_max
>= 0 && DOMAINDECOMP(cr
))
302 a_max
= cr
->dd
->globalAtomIndices
[la_max
];
310 snew(sum
, 2*cr
->nnodes
+1);
311 sum
[2*cr
->nodeid
] = fmax2
;
312 sum
[2*cr
->nodeid
+1] = a_max
;
313 sum
[2*cr
->nnodes
] = fnorm2
;
314 gmx_sumd(2*cr
->nnodes
+1, sum
, cr
);
315 fnorm2
= sum
[2*cr
->nnodes
];
316 /* Determine the global maximum */
317 for (i
= 0; i
< cr
->nnodes
; i
++)
319 if (sum
[2*i
] > fmax2
)
322 a_max
= gmx::roundToInt(sum
[2*i
+1]);
330 *fnorm
= sqrt(fnorm2
);
342 //! Compute the norm of the force
343 static void get_state_f_norm_max(const t_commrec
*cr
,
344 t_grpopts
*opts
, t_mdatoms
*mdatoms
,
347 get_f_norm_max(cr
, opts
, mdatoms
, ems
->f
.rvec_array(),
348 &ems
->fnorm
, &ems
->fmax
, &ems
->a_fmax
);
351 //! Initialize the energy minimization
352 static void init_em(FILE *fplog
,
353 const gmx::MDLogger
&mdlog
,
357 gmx::ImdSession
*imdSession
,
359 t_state
*state_global
, gmx_mtop_t
*top_global
,
360 em_state_t
*ems
, gmx_localtop_t
*top
,
363 t_graph
**graph
, gmx::MDAtoms
*mdAtoms
, gmx_global_stat_t
*gstat
,
364 gmx_vsite_t
*vsite
, gmx::Constraints
*constr
, gmx_shellfc_t
**shellfc
)
370 fprintf(fplog
, "Initiating %s\n", title
);
375 state_global
->ngtc
= 0;
377 initialize_lambdas(fplog
, *ir
, MASTER(cr
), &(state_global
->fep_state
), state_global
->lambda
, nullptr);
381 GMX_ASSERT(shellfc
!= nullptr, "With NM we always support shells");
383 *shellfc
= init_shell_flexcon(stdout
,
385 constr
? constr
->numFlexibleConstraints() : 0,
391 GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir
->eI
), "This else currently only handles energy minimizers, consider if your algorithm needs shell/flexible-constraint support");
393 /* With energy minimization, shells and flexible constraints are
394 * automatically minimized when treated like normal DOFS.
396 if (shellfc
!= nullptr)
402 auto mdatoms
= mdAtoms
->mdatoms();
403 if (DOMAINDECOMP(cr
))
405 top
->useInDomainDecomp_
= true;
406 dd_init_local_top(*top_global
, top
);
408 dd_init_local_state(cr
->dd
, state_global
, &ems
->s
);
410 /* Distribute the charge groups over the nodes from the master node */
411 dd_partition_system(fplog
, mdlog
, ir
->init_step
, cr
, TRUE
, 1,
412 state_global
, *top_global
, ir
, imdSession
, pull_work
,
413 &ems
->s
, &ems
->f
, mdAtoms
, top
,
415 nrnb
, nullptr, FALSE
);
416 dd_store_state(cr
->dd
, &ems
->s
);
422 state_change_natoms(state_global
, state_global
->natoms
);
423 /* Just copy the state */
424 ems
->s
= *state_global
;
425 state_change_natoms(&ems
->s
, ems
->s
.natoms
);
426 ems
->f
.resizeWithPadding(ems
->s
.natoms
);
428 mdAlgorithmsSetupAtomData(cr
, ir
, *top_global
, top
, fr
,
430 constr
, vsite
, shellfc
? *shellfc
: nullptr);
434 set_vsite_top(vsite
, top
, mdatoms
);
438 update_mdatoms(mdAtoms
->mdatoms(), ems
->s
.lambda
[efptMASS
]);
442 // TODO how should this cross-module support dependency be managed?
443 if (ir
->eConstrAlg
== econtSHAKE
&&
444 gmx_mtop_ftype_count(top_global
, F_CONSTR
) > 0)
446 gmx_fatal(FARGS
, "Can not do energy minimization with %s, use %s\n",
447 econstr_names
[econtSHAKE
], econstr_names
[econtLINCS
]);
450 if (!ir
->bContinuation
)
452 /* Constrain the starting coordinates */
454 constr
->apply(TRUE
, TRUE
,
456 ems
->s
.x
.rvec_array(),
457 ems
->s
.x
.rvec_array(),
460 ems
->s
.lambda
[efptFEP
], &dvdl_constr
,
461 nullptr, nullptr, gmx::ConstraintVariable::Positions
);
467 *gstat
= global_stat_init(ir
);
474 calc_shifts(ems
->s
.box
, fr
->shift_vec
);
477 //! Finalize the minimization
478 static void finish_em(const t_commrec
*cr
, gmx_mdoutf_t outf
,
479 gmx_walltime_accounting_t walltime_accounting
,
480 gmx_wallcycle_t wcycle
)
482 if (!thisRankHasDuty(cr
, DUTY_PME
))
484 /* Tell the PME only node to finish */
485 gmx_pme_send_finish(cr
);
490 em_time_end(walltime_accounting
, wcycle
);
493 //! Swap two different EM states during minimization
494 static void swap_em_state(em_state_t
**ems1
, em_state_t
**ems2
)
503 //! Save the EM trajectory
504 static void write_em_traj(FILE *fplog
, const t_commrec
*cr
,
506 gmx_bool bX
, gmx_bool bF
, const char *confout
,
507 gmx_mtop_t
*top_global
,
508 t_inputrec
*ir
, int64_t step
,
510 t_state
*state_global
,
511 ObservablesHistory
*observablesHistory
)
517 mdof_flags
|= MDOF_X
;
521 mdof_flags
|= MDOF_F
;
524 /* If we want IMD output, set appropriate MDOF flag */
527 mdof_flags
|= MDOF_IMD
;
530 mdoutf_write_to_trajectory_files(fplog
, cr
, outf
, mdof_flags
,
531 top_global
->natoms
, step
, static_cast<double>(step
),
532 &state
->s
, state_global
, observablesHistory
,
535 if (confout
!= nullptr)
537 if (DOMAINDECOMP(cr
))
539 /* If bX=true, x was collected to state_global in the call above */
542 auto globalXRef
= MASTER(cr
) ? state_global
->x
: gmx::ArrayRef
<gmx::RVec
>();
543 dd_collect_vec(cr
->dd
, &state
->s
, state
->s
.x
, globalXRef
);
548 /* Copy the local state pointer */
549 state_global
= &state
->s
;
554 if (ir
->ePBC
!= epbcNONE
&& !ir
->bPeriodicMols
&& DOMAINDECOMP(cr
))
556 /* Make molecules whole only for confout writing */
557 do_pbc_mtop(ir
->ePBC
, state
->s
.box
, top_global
,
558 state_global
->x
.rvec_array());
561 write_sto_conf_mtop(confout
,
562 *top_global
->name
, top_global
,
563 state_global
->x
.rvec_array(), nullptr, ir
->ePBC
, state
->s
.box
);
568 //! \brief Do one minimization step
570 // \returns true when the step succeeded, false when a constraint error occurred
571 static bool do_em_step(const t_commrec
*cr
,
572 t_inputrec
*ir
, t_mdatoms
*md
,
573 em_state_t
*ems1
, real a
, const PaddedHostVector
<gmx::RVec
> *force
,
575 gmx::Constraints
*constr
,
582 int nthreads gmx_unused
;
584 bool validStep
= true;
589 if (DOMAINDECOMP(cr
) && s1
->ddp_count
!= cr
->dd
->ddp_count
)
591 gmx_incons("state mismatch in do_em_step");
594 s2
->flags
= s1
->flags
;
596 if (s2
->natoms
!= s1
->natoms
)
598 state_change_natoms(s2
, s1
->natoms
);
599 ems2
->f
.resizeWithPadding(s2
->natoms
);
601 if (DOMAINDECOMP(cr
) && s2
->cg_gl
.size() != s1
->cg_gl
.size())
603 s2
->cg_gl
.resize(s1
->cg_gl
.size());
606 copy_mat(s1
->box
, s2
->box
);
607 /* Copy free energy state */
608 s2
->lambda
= s1
->lambda
;
609 copy_mat(s1
->box
, s2
->box
);
614 nthreads
= gmx_omp_nthreads_get(emntUpdate
);
615 #pragma omp parallel num_threads(nthreads)
617 const rvec
*x1
= s1
->x
.rvec_array();
618 rvec
*x2
= s2
->x
.rvec_array();
619 const rvec
*f
= force
->rvec_array();
622 #pragma omp for schedule(static) nowait
623 for (int i
= start
; i
< end
; i
++)
631 for (int m
= 0; m
< DIM
; m
++)
633 if (ir
->opts
.nFreeze
[gf
][m
])
639 x2
[i
][m
] = x1
[i
][m
] + a
*f
[i
][m
];
643 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
646 if (s2
->flags
& (1<<estCGP
))
648 /* Copy the CG p vector */
649 const rvec
*p1
= s1
->cg_p
.rvec_array();
650 rvec
*p2
= s2
->cg_p
.rvec_array();
651 #pragma omp for schedule(static) nowait
652 for (int i
= start
; i
< end
; i
++)
654 // Trivial OpenMP block that does not throw
655 copy_rvec(p1
[i
], p2
[i
]);
659 if (DOMAINDECOMP(cr
))
661 s2
->ddp_count
= s1
->ddp_count
;
663 /* OpenMP does not supported unsigned loop variables */
664 #pragma omp for schedule(static) nowait
665 for (gmx::index i
= 0; i
< gmx::ssize(s2
->cg_gl
); i
++)
667 s2
->cg_gl
[i
] = s1
->cg_gl
[i
];
669 s2
->ddp_count_cg_gl
= s1
->ddp_count_cg_gl
;
677 constr
->apply(TRUE
, TRUE
,
679 s1
->x
.rvec_array(), s2
->x
.rvec_array(),
681 s2
->lambda
[efptBONDED
], &dvdl_constr
,
682 nullptr, nullptr, gmx::ConstraintVariable::Positions
);
686 /* This global reduction will affect performance at high
687 * parallelization, but we can not really avoid it.
688 * But usually EM is not run at high parallelization.
690 int reductionBuffer
= static_cast<int>(!validStep
);
691 gmx_sumi(1, &reductionBuffer
, cr
);
692 validStep
= (reductionBuffer
== 0);
695 // We should move this check to the different minimizers
696 if (!validStep
&& ir
->eI
!= eiSteep
)
698 gmx_fatal(FARGS
, "The coordinates could not be constrained. Minimizer '%s' can not handle constraint failures, use minimizer '%s' before using '%s'.",
699 EI(ir
->eI
), EI(eiSteep
), EI(ir
->eI
));
706 //! Prepare EM for using domain decomposition parallellization
707 static void em_dd_partition_system(FILE *fplog
,
708 const gmx::MDLogger
&mdlog
,
709 int step
, const t_commrec
*cr
,
710 gmx_mtop_t
*top_global
, t_inputrec
*ir
,
711 gmx::ImdSession
*imdSession
,
713 em_state_t
*ems
, gmx_localtop_t
*top
,
714 gmx::MDAtoms
*mdAtoms
, t_forcerec
*fr
,
715 gmx_vsite_t
*vsite
, gmx::Constraints
*constr
,
716 t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
)
718 /* Repartition the domain decomposition */
719 dd_partition_system(fplog
, mdlog
, step
, cr
, FALSE
, 1,
720 nullptr, *top_global
, ir
, imdSession
, pull_work
,
722 mdAtoms
, top
, fr
, vsite
, constr
,
723 nrnb
, wcycle
, FALSE
);
724 dd_store_state(cr
->dd
, &ems
->s
);
730 /*! \brief Class to handle the work of setting and doing an energy evaluation.
732 * This class is a mere aggregate of parameters to pass to evaluate an
733 * energy, so that future changes to names and types of them consume
734 * less time when refactoring other code.
736 * Aggregate initialization is used, for which the chief risk is that
737 * if a member is added at the end and not all initializer lists are
738 * updated, then the member will be value initialized, which will
739 * typically mean initialization to zero.
741 * Use a braced initializer list to construct one of these. */
742 class EnergyEvaluator
745 /*! \brief Evaluates an energy on the state in \c ems.
747 * \todo In practice, the same objects mu_tot, vir, and pres
748 * are always passed to this function, so we would rather have
749 * them as data members. However, their C-array types are
750 * unsuited for aggregate initialization. When the types
751 * improve, the call signature of this method can be reduced.
753 void run(em_state_t
*ems
, rvec mu_tot
,
754 tensor vir
, tensor pres
,
755 int64_t count
, gmx_bool bFirst
);
756 //! Handles logging (deprecated).
759 const gmx::MDLogger
&mdlog
;
760 //! Handles communication.
762 //! Coordinates multi-simulations.
763 const gmx_multisim_t
*ms
;
764 //! Holds the simulation topology.
765 gmx_mtop_t
*top_global
;
766 //! Holds the domain topology.
768 //! User input options.
769 t_inputrec
*inputrec
;
770 //! The Interactive Molecular Dynamics session.
771 gmx::ImdSession
*imdSession
;
772 //! The pull work object.
774 //! Manages flop accounting.
776 //! Manages wall cycle accounting.
777 gmx_wallcycle_t wcycle
;
778 //! Coordinates global reduction.
779 gmx_global_stat_t gstat
;
780 //! Handles virtual sites.
782 //! Handles constraints.
783 gmx::Constraints
*constr
;
784 //! Handles strange things.
786 //! Molecular graph for SHAKE.
788 //! Per-atom data for this domain.
789 gmx::MDAtoms
*mdAtoms
;
790 //! Handles how to calculate the forces.
792 //! Schedule of force-calculation work each step for this task.
793 gmx::MdScheduleWorkload
*mdScheduleWork
;
794 //! Stores the computed energies.
795 gmx_enerdata_t
*enerd
;
799 EnergyEvaluator::run(em_state_t
*ems
, rvec mu_tot
,
800 tensor vir
, tensor pres
,
801 int64_t count
, gmx_bool bFirst
)
805 tensor force_vir
, shake_vir
, ekin
;
809 /* Set the time to the initial time, the time does not change during EM */
810 t
= inputrec
->init_t
;
813 (DOMAINDECOMP(cr
) && ems
->s
.ddp_count
< cr
->dd
->ddp_count
))
815 /* This is the first state or an old state used before the last ns */
821 if (inputrec
->nstlist
> 0)
829 construct_vsites(vsite
, ems
->s
.x
.rvec_array(), 1, nullptr,
830 top
->idef
.iparams
, top
->idef
.il
,
831 fr
->ePBC
, fr
->bMolPBC
, cr
, ems
->s
.box
);
834 if (DOMAINDECOMP(cr
) && bNS
)
836 /* Repartition the domain decomposition */
837 em_dd_partition_system(fplog
, mdlog
, count
, cr
, top_global
, inputrec
, imdSession
,
839 ems
, top
, mdAtoms
, fr
, vsite
, constr
,
843 /* Calc force & energy on new trial position */
844 /* do_force always puts the charge groups in the box and shifts again
845 * We do not unshift, so molecules are always whole in congrad.c
847 do_force(fplog
, cr
, ms
, inputrec
, nullptr, nullptr, imdSession
,
849 count
, nrnb
, wcycle
, top
,
850 ems
->s
.box
, ems
->s
.x
.arrayRefWithPadding(), &ems
->s
.hist
,
851 ems
->f
.arrayRefWithPadding(), force_vir
, mdAtoms
->mdatoms(), enerd
, fcd
,
852 ems
->s
.lambda
, graph
, fr
, mdScheduleWork
, vsite
, mu_tot
, t
, nullptr,
853 GMX_FORCE_STATECHANGED
| GMX_FORCE_ALLFORCES
|
854 GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
|
855 (bNS
? GMX_FORCE_NS
: 0),
856 DDBalanceRegionHandler(cr
));
858 /* Clear the unused shake virial and pressure */
859 clear_mat(shake_vir
);
862 /* Communicate stuff when parallel */
863 if (PAR(cr
) && inputrec
->eI
!= eiNM
)
865 wallcycle_start(wcycle
, ewcMoveE
);
867 global_stat(gstat
, cr
, enerd
, force_vir
, shake_vir
, mu_tot
,
868 inputrec
, nullptr, nullptr, nullptr, 1, &terminate
,
874 wallcycle_stop(wcycle
, ewcMoveE
);
877 if (fr
->dispersionCorrection
)
879 /* Calculate long range corrections to pressure and energy */
880 const DispersionCorrection::Correction correction
=
881 fr
->dispersionCorrection
->calculate(ems
->s
.box
, ems
->s
.lambda
[efptVDW
]);
883 enerd
->term
[F_DISPCORR
] = correction
.energy
;
884 enerd
->term
[F_EPOT
] += correction
.energy
;
885 enerd
->term
[F_PRES
] += correction
.pressure
;
886 enerd
->term
[F_DVDL
] += correction
.dvdl
;
890 enerd
->term
[F_DISPCORR
] = 0;
893 ems
->epot
= enerd
->term
[F_EPOT
];
897 /* Project out the constraint components of the force */
899 rvec
*f_rvec
= ems
->f
.rvec_array();
900 constr
->apply(FALSE
, FALSE
,
902 ems
->s
.x
.rvec_array(), f_rvec
, f_rvec
,
904 ems
->s
.lambda
[efptBONDED
], &dvdl_constr
,
905 nullptr, &shake_vir
, gmx::ConstraintVariable::ForceDispl
);
906 enerd
->term
[F_DVDL_CONSTR
] += dvdl_constr
;
907 m_add(force_vir
, shake_vir
, vir
);
911 copy_mat(force_vir
, vir
);
915 enerd
->term
[F_PRES
] =
916 calc_pres(fr
->ePBC
, inputrec
->nwall
, ems
->s
.box
, ekin
, vir
, pres
);
918 sum_dhdl(enerd
, ems
->s
.lambda
, inputrec
->fepvals
);
920 if (EI_ENERGY_MINIMIZATION(inputrec
->eI
))
922 get_state_f_norm_max(cr
, &(inputrec
->opts
), mdAtoms
->mdatoms(), ems
);
928 //! Parallel utility summing energies and forces
929 static double reorder_partsum(const t_commrec
*cr
, t_grpopts
*opts
,
930 gmx_mtop_t
*top_global
,
931 em_state_t
*s_min
, em_state_t
*s_b
)
934 int ncg
, *cg_gl
, *index
, c
, cg
, i
, a0
, a1
, a
, gf
, m
;
939 fprintf(debug
, "Doing reorder_partsum\n");
942 const rvec
*fm
= s_min
->f
.rvec_array();
943 const rvec
*fb
= s_b
->f
.rvec_array();
945 cgs_gl
= dd_charge_groups_global(cr
->dd
);
946 index
= cgs_gl
->index
;
948 /* Collect fm in a global vector fmg.
949 * This conflicts with the spirit of domain decomposition,
950 * but to fully optimize this a much more complicated algorithm is required.
953 snew(fmg
, top_global
->natoms
);
955 ncg
= s_min
->s
.cg_gl
.size();
956 cg_gl
= s_min
->s
.cg_gl
.data();
958 for (c
= 0; c
< ncg
; c
++)
963 for (a
= a0
; a
< a1
; a
++)
965 copy_rvec(fm
[i
], fmg
[a
]);
969 gmx_sum(top_global
->natoms
*3, fmg
[0], cr
);
971 /* Now we will determine the part of the sum for the cgs in state s_b */
972 ncg
= s_b
->s
.cg_gl
.size();
973 cg_gl
= s_b
->s
.cg_gl
.data();
977 gmx::ArrayRef
<unsigned char> grpnrFREEZE
= top_global
->groups
.groupNumbers
[SimulationAtomGroupType::Freeze
];
978 for (c
= 0; c
< ncg
; c
++)
983 for (a
= a0
; a
< a1
; a
++)
985 if (!grpnrFREEZE
.empty())
989 for (m
= 0; m
< DIM
; m
++)
991 if (!opts
->nFreeze
[gf
][m
])
993 partsum
+= (fb
[i
][m
] - fmg
[a
][m
])*fb
[i
][m
];
1005 //! Print some stuff, like beta, whatever that means.
1006 static real
pr_beta(const t_commrec
*cr
, t_grpopts
*opts
, t_mdatoms
*mdatoms
,
1007 gmx_mtop_t
*top_global
,
1008 em_state_t
*s_min
, em_state_t
*s_b
)
1012 /* This is just the classical Polak-Ribiere calculation of beta;
1013 * it looks a bit complicated since we take freeze groups into account,
1014 * and might have to sum it in parallel runs.
1017 if (!DOMAINDECOMP(cr
) ||
1018 (s_min
->s
.ddp_count
== cr
->dd
->ddp_count
&&
1019 s_b
->s
.ddp_count
== cr
->dd
->ddp_count
))
1021 const rvec
*fm
= s_min
->f
.rvec_array();
1022 const rvec
*fb
= s_b
->f
.rvec_array();
1025 /* This part of code can be incorrect with DD,
1026 * since the atom ordering in s_b and s_min might differ.
1028 for (int i
= 0; i
< mdatoms
->homenr
; i
++)
1030 if (mdatoms
->cFREEZE
)
1032 gf
= mdatoms
->cFREEZE
[i
];
1034 for (int m
= 0; m
< DIM
; m
++)
1036 if (!opts
->nFreeze
[gf
][m
])
1038 sum
+= (fb
[i
][m
] - fm
[i
][m
])*fb
[i
][m
];
1045 /* We need to reorder cgs while summing */
1046 sum
= reorder_partsum(cr
, opts
, top_global
, s_min
, s_b
);
1050 gmx_sumd(1, &sum
, cr
);
1053 return sum
/gmx::square(s_min
->fnorm
);
1060 LegacySimulator::do_cg()
1062 const char *CG
= "Polak-Ribiere Conjugate Gradients";
1065 gmx_global_stat_t gstat
;
1067 double tmp
, minstep
;
1069 real a
, b
, c
, beta
= 0.0;
1072 gmx_bool converged
, foundlower
;
1074 gmx_bool do_log
= FALSE
, do_ene
= FALSE
, do_x
, do_f
;
1076 int number_steps
, neval
= 0, nstcg
= inputrec
->nstcgsteep
;
1077 int m
, step
, nminstep
;
1078 auto mdatoms
= mdAtoms
->mdatoms();
1080 GMX_LOG(mdlog
.info
).asParagraph().
1081 appendText("Note that activating conjugate gradient energy minimization via the "
1082 "integrator .mdp option and the command gmx mdrun may "
1083 "be available in a different form in a future version of GROMACS, "
1084 "e.g. gmx minimize and an .mdp option.");
1090 // In CG, the state is extended with a search direction
1091 state_global
->flags
|= (1<<estCGP
);
1093 // Ensure the extra per-atom state array gets allocated
1094 state_change_natoms(state_global
, state_global
->natoms
);
1096 // Initialize the search direction to zero
1097 for (RVec
&cg_p
: state_global
->cg_p
)
1103 /* Create 4 states on the stack and extract pointers that we will swap */
1104 em_state_t s0
{}, s1
{}, s2
{}, s3
{};
1105 em_state_t
*s_min
= &s0
;
1106 em_state_t
*s_a
= &s1
;
1107 em_state_t
*s_b
= &s2
;
1108 em_state_t
*s_c
= &s3
;
1110 /* Init em and store the local state in s_min */
1111 init_em(fplog
, mdlog
, CG
, cr
, inputrec
, imdSession
,
1113 state_global
, top_global
, s_min
, &top
,
1114 nrnb
, fr
, &graph
, mdAtoms
, &gstat
,
1115 vsite
, constr
, nullptr);
1116 gmx_mdoutf
*outf
= init_mdoutf(fplog
, nfile
, fnm
, mdrunOptions
, cr
, outputProvider
, mdModulesNotifier
, inputrec
, top_global
, nullptr, wcycle
,
1117 StartingBehavior::NewSimulation
);
1118 gmx::EnergyOutput
energyOutput(mdoutf_get_fp_ene(outf
), top_global
, inputrec
, pull_work
, nullptr, false, mdModulesNotifier
);
1120 /* Print to log file */
1121 print_em_start(fplog
, cr
, walltime_accounting
, wcycle
, CG
);
1123 /* Max number of steps */
1124 number_steps
= inputrec
->nsteps
;
1128 sp_header(stderr
, CG
, inputrec
->em_tol
, number_steps
);
1132 sp_header(fplog
, CG
, inputrec
->em_tol
, number_steps
);
1135 EnergyEvaluator energyEvaluator
{
1136 fplog
, mdlog
, cr
, ms
,
1138 inputrec
, imdSession
, pull_work
, nrnb
, wcycle
, gstat
,
1139 vsite
, constr
, fcd
, graph
,
1140 mdAtoms
, fr
, mdScheduleWork
, enerd
1142 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1143 /* do_force always puts the charge groups in the box and shifts again
1144 * We do not unshift, so molecules are always whole in congrad.c
1146 energyEvaluator
.run(s_min
, mu_tot
, vir
, pres
, -1, TRUE
);
1150 /* Copy stuff to the energy bin for easy printing etc. */
1151 matrix nullBox
= {};
1152 energyOutput
.addDataAtEnergyStep(false, false, static_cast<double>(step
),
1153 mdatoms
->tmass
, enerd
, nullptr, nullptr, nullptr, nullBox
,
1154 nullptr, nullptr, vir
, pres
, nullptr, mu_tot
, constr
);
1156 energyOutput
.printHeader(fplog
, step
, step
);
1157 energyOutput
.printStepToEnergyFile(mdoutf_get_fp_ene(outf
), TRUE
, FALSE
, FALSE
,
1162 /* Estimate/guess the initial stepsize */
1163 stepsize
= inputrec
->em_stepsize
/s_min
->fnorm
;
1167 double sqrtNumAtoms
= sqrt(static_cast<double>(state_global
->natoms
));
1168 fprintf(stderr
, " F-max = %12.5e on atom %d\n",
1169 s_min
->fmax
, s_min
->a_fmax
+1);
1170 fprintf(stderr
, " F-Norm = %12.5e\n",
1171 s_min
->fnorm
/sqrtNumAtoms
);
1172 fprintf(stderr
, "\n");
1173 /* and copy to the log file too... */
1174 fprintf(fplog
, " F-max = %12.5e on atom %d\n",
1175 s_min
->fmax
, s_min
->a_fmax
+1);
1176 fprintf(fplog
, " F-Norm = %12.5e\n",
1177 s_min
->fnorm
/sqrtNumAtoms
);
1178 fprintf(fplog
, "\n");
1180 /* Start the loop over CG steps.
1181 * Each successful step is counted, and we continue until
1182 * we either converge or reach the max number of steps.
1185 for (step
= 0; (number_steps
< 0 || step
<= number_steps
) && !converged
; step
++)
1188 /* start taking steps in a new direction
1189 * First time we enter the routine, beta=0, and the direction is
1190 * simply the negative gradient.
1193 /* Calculate the new direction in p, and the gradient in this direction, gpa */
1194 rvec
*pm
= s_min
->s
.cg_p
.rvec_array();
1195 const rvec
*sfm
= s_min
->f
.rvec_array();
1198 for (int i
= 0; i
< mdatoms
->homenr
; i
++)
1200 if (mdatoms
->cFREEZE
)
1202 gf
= mdatoms
->cFREEZE
[i
];
1204 for (m
= 0; m
< DIM
; m
++)
1206 if (!inputrec
->opts
.nFreeze
[gf
][m
])
1208 pm
[i
][m
] = sfm
[i
][m
] + beta
*pm
[i
][m
];
1209 gpa
-= pm
[i
][m
]*sfm
[i
][m
];
1210 /* f is negative gradient, thus the sign */
1219 /* Sum the gradient along the line across CPUs */
1222 gmx_sumd(1, &gpa
, cr
);
1225 /* Calculate the norm of the search vector */
1226 get_f_norm_max(cr
, &(inputrec
->opts
), mdatoms
, pm
, &pnorm
, nullptr, nullptr);
1228 /* Just in case stepsize reaches zero due to numerical precision... */
1231 stepsize
= inputrec
->em_stepsize
/pnorm
;
1235 * Double check the value of the derivative in the search direction.
1236 * If it is positive it must be due to the old information in the
1237 * CG formula, so just remove that and start over with beta=0.
1238 * This corresponds to a steepest descent step.
1243 step
--; /* Don't count this step since we are restarting */
1244 continue; /* Go back to the beginning of the big for-loop */
1247 /* Calculate minimum allowed stepsize, before the average (norm)
1248 * relative change in coordinate is smaller than precision
1251 auto s_min_x
= makeArrayRef(s_min
->s
.x
);
1252 for (int i
= 0; i
< mdatoms
->homenr
; i
++)
1254 for (m
= 0; m
< DIM
; m
++)
1256 tmp
= fabs(s_min_x
[i
][m
]);
1265 /* Add up from all CPUs */
1268 gmx_sumd(1, &minstep
, cr
);
1271 minstep
= GMX_REAL_EPS
/sqrt(minstep
/(3*top_global
->natoms
));
1273 if (stepsize
< minstep
)
1279 /* Write coordinates if necessary */
1280 do_x
= do_per_step(step
, inputrec
->nstxout
);
1281 do_f
= do_per_step(step
, inputrec
->nstfout
);
1283 write_em_traj(fplog
, cr
, outf
, do_x
, do_f
, nullptr,
1284 top_global
, inputrec
, step
,
1285 s_min
, state_global
, observablesHistory
);
1287 /* Take a step downhill.
1288 * In theory, we should minimize the function along this direction.
1289 * That is quite possible, but it turns out to take 5-10 function evaluations
1290 * for each line. However, we dont really need to find the exact minimum -
1291 * it is much better to start a new CG step in a modified direction as soon
1292 * as we are close to it. This will save a lot of energy evaluations.
1294 * In practice, we just try to take a single step.
1295 * If it worked (i.e. lowered the energy), we increase the stepsize but
1296 * the continue straight to the next CG step without trying to find any minimum.
1297 * If it didn't work (higher energy), there must be a minimum somewhere between
1298 * the old position and the new one.
1300 * Due to the finite numerical accuracy, it turns out that it is a good idea
1301 * to even accept a SMALL increase in energy, if the derivative is still downhill.
1302 * This leads to lower final energies in the tests I've done. / Erik
1304 s_a
->epot
= s_min
->epot
;
1306 c
= a
+ stepsize
; /* reference position along line is zero */
1308 if (DOMAINDECOMP(cr
) && s_min
->s
.ddp_count
< cr
->dd
->ddp_count
)
1310 em_dd_partition_system(fplog
, mdlog
, step
, cr
, top_global
, inputrec
, imdSession
,
1312 s_min
, &top
, mdAtoms
, fr
, vsite
, constr
,
1316 /* Take a trial step (new coords in s_c) */
1317 do_em_step(cr
, inputrec
, mdatoms
, s_min
, c
, &s_min
->s
.cg_p
, s_c
,
1321 /* Calculate energy for the trial step */
1322 energyEvaluator
.run(s_c
, mu_tot
, vir
, pres
, -1, FALSE
);
1324 /* Calc derivative along line */
1325 const rvec
*pc
= s_c
->s
.cg_p
.rvec_array();
1326 const rvec
*sfc
= s_c
->f
.rvec_array();
1328 for (int i
= 0; i
< mdatoms
->homenr
; i
++)
1330 for (m
= 0; m
< DIM
; m
++)
1332 gpc
-= pc
[i
][m
]*sfc
[i
][m
]; /* f is negative gradient, thus the sign */
1335 /* Sum the gradient along the line across CPUs */
1338 gmx_sumd(1, &gpc
, cr
);
1341 /* This is the max amount of increase in energy we tolerate */
1342 tmp
= std::sqrt(GMX_REAL_EPS
)*fabs(s_a
->epot
);
1344 /* Accept the step if the energy is lower, or if it is not significantly higher
1345 * and the line derivative is still negative.
1347 if (s_c
->epot
< s_a
->epot
|| (gpc
< 0 && s_c
->epot
< (s_a
->epot
+ tmp
)))
1350 /* Great, we found a better energy. Increase step for next iteration
1351 * if we are still going down, decrease it otherwise
1355 stepsize
*= 1.618034; /* The golden section */
1359 stepsize
*= 0.618034; /* 1/golden section */
1364 /* New energy is the same or higher. We will have to do some work
1365 * to find a smaller value in the interval. Take smaller step next time!
1368 stepsize
*= 0.618034;
1374 /* OK, if we didn't find a lower value we will have to locate one now - there must
1375 * be one in the interval [a=0,c].
1376 * The same thing is valid here, though: Don't spend dozens of iterations to find
1377 * the line minimum. We try to interpolate based on the derivative at the endpoints,
1378 * and only continue until we find a lower value. In most cases this means 1-2 iterations.
1380 * I also have a safeguard for potentially really pathological functions so we never
1381 * take more than 20 steps before we give up ...
1383 * If we already found a lower value we just skip this step and continue to the update.
1392 /* Select a new trial point.
1393 * If the derivatives at points a & c have different sign we interpolate to zero,
1394 * otherwise just do a bisection.
1396 if (gpa
< 0 && gpc
> 0)
1398 b
= a
+ gpa
*(a
-c
)/(gpc
-gpa
);
1405 /* safeguard if interpolation close to machine accuracy causes errors:
1406 * never go outside the interval
1408 if (b
<= a
|| b
>= c
)
1413 if (DOMAINDECOMP(cr
) && s_min
->s
.ddp_count
!= cr
->dd
->ddp_count
)
1415 /* Reload the old state */
1416 em_dd_partition_system(fplog
, mdlog
, -1, cr
, top_global
, inputrec
, imdSession
,
1418 s_min
, &top
, mdAtoms
, fr
, vsite
, constr
,
1422 /* Take a trial step to this new point - new coords in s_b */
1423 do_em_step(cr
, inputrec
, mdatoms
, s_min
, b
, &s_min
->s
.cg_p
, s_b
,
1427 /* Calculate energy for the trial step */
1428 energyEvaluator
.run(s_b
, mu_tot
, vir
, pres
, -1, FALSE
);
1430 /* p does not change within a step, but since the domain decomposition
1431 * might change, we have to use cg_p of s_b here.
1433 const rvec
*pb
= s_b
->s
.cg_p
.rvec_array();
1434 const rvec
*sfb
= s_b
->f
.rvec_array();
1436 for (int i
= 0; i
< mdatoms
->homenr
; i
++)
1438 for (m
= 0; m
< DIM
; m
++)
1440 gpb
-= pb
[i
][m
]*sfb
[i
][m
]; /* f is negative gradient, thus the sign */
1443 /* Sum the gradient along the line across CPUs */
1446 gmx_sumd(1, &gpb
, cr
);
1451 fprintf(debug
, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n",
1452 s_a
->epot
, s_b
->epot
, s_c
->epot
, gpb
);
1455 epot_repl
= s_b
->epot
;
1457 /* Keep one of the intervals based on the value of the derivative at the new point */
1460 /* Replace c endpoint with b */
1461 swap_em_state(&s_b
, &s_c
);
1467 /* Replace a endpoint with b */
1468 swap_em_state(&s_b
, &s_a
);
1474 * Stop search as soon as we find a value smaller than the endpoints.
1475 * Never run more than 20 steps, no matter what.
1479 while ((epot_repl
> s_a
->epot
|| epot_repl
> s_c
->epot
) &&
1482 if (std::fabs(epot_repl
- s_min
->epot
) < fabs(s_min
->epot
)*GMX_REAL_EPS
||
1485 /* OK. We couldn't find a significantly lower energy.
1486 * If beta==0 this was steepest descent, and then we give up.
1487 * If not, set beta=0 and restart with steepest descent before quitting.
1497 /* Reset memory before giving up */
1503 /* Select min energy state of A & C, put the best in B.
1505 if (s_c
->epot
< s_a
->epot
)
1509 fprintf(debug
, "CGE: C (%f) is lower than A (%f), moving C to B\n",
1510 s_c
->epot
, s_a
->epot
);
1512 swap_em_state(&s_b
, &s_c
);
1519 fprintf(debug
, "CGE: A (%f) is lower than C (%f), moving A to B\n",
1520 s_a
->epot
, s_c
->epot
);
1522 swap_em_state(&s_b
, &s_a
);
1531 fprintf(debug
, "CGE: Found a lower energy %f, moving C to B\n",
1534 swap_em_state(&s_b
, &s_c
);
1538 /* new search direction */
1539 /* beta = 0 means forget all memory and restart with steepest descents. */
1540 if (nstcg
&& ((step
% nstcg
) == 0))
1546 /* s_min->fnorm cannot be zero, because then we would have converged
1550 /* Polak-Ribiere update.
1551 * Change to fnorm2/fnorm2_old for Fletcher-Reeves
1553 beta
= pr_beta(cr
, &inputrec
->opts
, mdatoms
, top_global
, s_min
, s_b
);
1555 /* Limit beta to prevent oscillations */
1556 if (fabs(beta
) > 5.0)
1562 /* update positions */
1563 swap_em_state(&s_min
, &s_b
);
1566 /* Print it if necessary */
1569 if (mdrunOptions
.verbose
)
1571 double sqrtNumAtoms
= sqrt(static_cast<double>(state_global
->natoms
));
1572 fprintf(stderr
, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
1573 step
, s_min
->epot
, s_min
->fnorm
/sqrtNumAtoms
,
1574 s_min
->fmax
, s_min
->a_fmax
+1);
1577 /* Store the new (lower) energies */
1578 matrix nullBox
= {};
1579 energyOutput
.addDataAtEnergyStep(false, false, static_cast<double>(step
),
1580 mdatoms
->tmass
, enerd
, nullptr, nullptr, nullptr, nullBox
,
1581 nullptr, nullptr, vir
, pres
, nullptr, mu_tot
, constr
);
1583 do_log
= do_per_step(step
, inputrec
->nstlog
);
1584 do_ene
= do_per_step(step
, inputrec
->nstenergy
);
1586 imdSession
->fillEnergyRecord(step
, TRUE
);
1590 energyOutput
.printHeader(fplog
, step
, step
);
1592 energyOutput
.printStepToEnergyFile(mdoutf_get_fp_ene(outf
), do_ene
, FALSE
, FALSE
,
1593 do_log
? fplog
: nullptr, step
, step
,
1597 /* Send energies and positions to the IMD client if bIMD is TRUE. */
1598 if (MASTER(cr
) && imdSession
->run(step
, TRUE
, state_global
->box
, state_global
->x
.rvec_array(), 0))
1600 imdSession
->sendPositionsAndEnergies();
1603 /* Stop when the maximum force lies below tolerance.
1604 * If we have reached machine precision, converged is already set to true.
1606 converged
= converged
|| (s_min
->fmax
< inputrec
->em_tol
);
1608 } /* End of the loop */
1612 step
--; /* we never took that last step in this case */
1615 if (s_min
->fmax
> inputrec
->em_tol
)
1619 warn_step(fplog
, inputrec
->em_tol
, s_min
->fmax
,
1620 step
-1 == number_steps
, FALSE
);
1627 /* If we printed energy and/or logfile last step (which was the last step)
1628 * we don't have to do it again, but otherwise print the final values.
1632 /* Write final value to log since we didn't do anything the last step */
1633 energyOutput
.printHeader(fplog
, step
, step
);
1635 if (!do_ene
|| !do_log
)
1637 /* Write final energy file entries */
1638 energyOutput
.printStepToEnergyFile(mdoutf_get_fp_ene(outf
), !do_ene
, FALSE
, FALSE
,
1639 !do_log
? fplog
: nullptr, step
, step
,
1644 /* Print some stuff... */
1647 fprintf(stderr
, "\nwriting lowest energy coordinates.\n");
1651 * For accurate normal mode calculation it is imperative that we
1652 * store the last conformation into the full precision binary trajectory.
1654 * However, we should only do it if we did NOT already write this step
1655 * above (which we did if do_x or do_f was true).
1657 /* Note that with 0 < nstfout != nstxout we can end up with two frames
1658 * in the trajectory with the same step number.
1660 do_x
= !do_per_step(step
, inputrec
->nstxout
);
1661 do_f
= (inputrec
->nstfout
> 0 && !do_per_step(step
, inputrec
->nstfout
));
1663 write_em_traj(fplog
, cr
, outf
, do_x
, do_f
, ftp2fn(efSTO
, nfile
, fnm
),
1664 top_global
, inputrec
, step
,
1665 s_min
, state_global
, observablesHistory
);
1670 double sqrtNumAtoms
= sqrt(static_cast<double>(state_global
->natoms
));
1671 print_converged(stderr
, CG
, inputrec
->em_tol
, step
, converged
, number_steps
,
1672 s_min
, sqrtNumAtoms
);
1673 print_converged(fplog
, CG
, inputrec
->em_tol
, step
, converged
, number_steps
,
1674 s_min
, sqrtNumAtoms
);
1676 fprintf(fplog
, "\nPerformed %d energy evaluations in total.\n", neval
);
1679 finish_em(cr
, outf
, walltime_accounting
, wcycle
);
1681 /* To print the actual number of steps we needed somewhere */
1682 walltime_accounting_set_nsteps_done(walltime_accounting
, step
);
1687 LegacySimulator::do_lbfgs()
1689 static const char *LBFGS
= "Low-Memory BFGS Minimizer";
1692 gmx_global_stat_t gstat
;
1694 int ncorr
, nmaxcorr
, point
, cp
, neval
, nminstep
;
1695 double stepsize
, step_taken
, gpa
, gpb
, gpc
, tmp
, minstep
;
1696 real
*rho
, *alpha
, *p
, *s
, **dx
, **dg
;
1697 real a
, b
, c
, maxdelta
, delta
;
1699 real dgdx
, dgdg
, sq
, yr
, beta
;
1702 gmx_bool do_log
, do_ene
, do_x
, do_f
, foundlower
, *frozen
;
1704 int start
, end
, number_steps
;
1705 int i
, k
, m
, n
, gf
, step
;
1707 auto mdatoms
= mdAtoms
->mdatoms();
1709 GMX_LOG(mdlog
.info
).asParagraph().
1710 appendText("Note that activating L-BFGS energy minimization via the "
1711 "integrator .mdp option and the command gmx mdrun may "
1712 "be available in a different form in a future version of GROMACS, "
1713 "e.g. gmx minimize and an .mdp option.");
1717 gmx_fatal(FARGS
, "L-BFGS minimization only supports a single rank");
1720 if (nullptr != constr
)
1722 gmx_fatal(FARGS
, "The combination of constraints and L-BFGS minimization is not implemented. Either do not use constraints, or use another minimizer (e.g. steepest descent).");
1725 n
= 3*state_global
->natoms
;
1726 nmaxcorr
= inputrec
->nbfgscorr
;
1731 snew(rho
, nmaxcorr
);
1732 snew(alpha
, nmaxcorr
);
1735 for (i
= 0; i
< nmaxcorr
; i
++)
1741 for (i
= 0; i
< nmaxcorr
; i
++)
1750 init_em(fplog
, mdlog
, LBFGS
, cr
, inputrec
, imdSession
,
1752 state_global
, top_global
, &ems
, &top
,
1753 nrnb
, fr
, &graph
, mdAtoms
, &gstat
,
1754 vsite
, constr
, nullptr);
1755 gmx_mdoutf
*outf
= init_mdoutf(fplog
, nfile
, fnm
, mdrunOptions
, cr
, outputProvider
, mdModulesNotifier
, inputrec
, top_global
, nullptr, wcycle
,
1756 StartingBehavior::NewSimulation
);
1757 gmx::EnergyOutput
energyOutput(mdoutf_get_fp_ene(outf
), top_global
, inputrec
, pull_work
, nullptr, false, mdModulesNotifier
);
1760 end
= mdatoms
->homenr
;
1762 /* We need 4 working states */
1763 em_state_t s0
{}, s1
{}, s2
{}, s3
{};
1764 em_state_t
*sa
= &s0
;
1765 em_state_t
*sb
= &s1
;
1766 em_state_t
*sc
= &s2
;
1767 em_state_t
*last
= &s3
;
1768 /* Initialize by copying the state from ems (we could skip x and f here) */
1773 /* Print to log file */
1774 print_em_start(fplog
, cr
, walltime_accounting
, wcycle
, LBFGS
);
1776 do_log
= do_ene
= do_x
= do_f
= TRUE
;
1778 /* Max number of steps */
1779 number_steps
= inputrec
->nsteps
;
1781 /* Create a 3*natoms index to tell whether each degree of freedom is frozen */
1783 for (i
= start
; i
< end
; i
++)
1785 if (mdatoms
->cFREEZE
)
1787 gf
= mdatoms
->cFREEZE
[i
];
1789 for (m
= 0; m
< DIM
; m
++)
1791 frozen
[3*i
+m
] = (inputrec
->opts
.nFreeze
[gf
][m
] != 0);
1796 sp_header(stderr
, LBFGS
, inputrec
->em_tol
, number_steps
);
1800 sp_header(fplog
, LBFGS
, inputrec
->em_tol
, number_steps
);
1805 construct_vsites(vsite
, state_global
->x
.rvec_array(), 1, nullptr,
1806 top
.idef
.iparams
, top
.idef
.il
,
1807 fr
->ePBC
, fr
->bMolPBC
, cr
, state_global
->box
);
1810 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1811 /* do_force always puts the charge groups in the box and shifts again
1812 * We do not unshift, so molecules are always whole
1815 EnergyEvaluator energyEvaluator
{
1816 fplog
, mdlog
, cr
, ms
,
1818 inputrec
, imdSession
, pull_work
, nrnb
, wcycle
, gstat
,
1819 vsite
, constr
, fcd
, graph
,
1820 mdAtoms
, fr
, mdScheduleWork
, enerd
1822 energyEvaluator
.run(&ems
, mu_tot
, vir
, pres
, -1, TRUE
);
1826 /* Copy stuff to the energy bin for easy printing etc. */
1827 matrix nullBox
= {};
1828 energyOutput
.addDataAtEnergyStep(false, false, static_cast<double>(step
),
1829 mdatoms
->tmass
, enerd
, nullptr, nullptr, nullptr, nullBox
,
1830 nullptr, nullptr, vir
, pres
, nullptr, mu_tot
, constr
);
1832 energyOutput
.printHeader(fplog
, step
, step
);
1833 energyOutput
.printStepToEnergyFile(mdoutf_get_fp_ene(outf
), TRUE
, FALSE
, FALSE
,
1838 /* Set the initial step.
1839 * since it will be multiplied by the non-normalized search direction
1840 * vector (force vector the first time), we scale it by the
1841 * norm of the force.
1846 double sqrtNumAtoms
= sqrt(static_cast<double>(state_global
->natoms
));
1847 fprintf(stderr
, "Using %d BFGS correction steps.\n\n", nmaxcorr
);
1848 fprintf(stderr
, " F-max = %12.5e on atom %d\n", ems
.fmax
, ems
.a_fmax
+ 1);
1849 fprintf(stderr
, " F-Norm = %12.5e\n", ems
.fnorm
/sqrtNumAtoms
);
1850 fprintf(stderr
, "\n");
1851 /* and copy to the log file too... */
1852 fprintf(fplog
, "Using %d BFGS correction steps.\n\n", nmaxcorr
);
1853 fprintf(fplog
, " F-max = %12.5e on atom %d\n", ems
.fmax
, ems
.a_fmax
+ 1);
1854 fprintf(fplog
, " F-Norm = %12.5e\n", ems
.fnorm
/sqrtNumAtoms
);
1855 fprintf(fplog
, "\n");
1858 // Point is an index to the memory of search directions, where 0 is the first one.
1861 // Set initial search direction to the force (-gradient), or 0 for frozen particles.
1862 real
*fInit
= static_cast<real
*>(ems
.f
.rvec_array()[0]);
1863 for (i
= 0; i
< n
; i
++)
1867 dx
[point
][i
] = fInit
[i
]; /* Initial search direction */
1875 // Stepsize will be modified during the search, and actually it is not critical
1876 // (the main efficiency in the algorithm comes from changing directions), but
1877 // we still need an initial value, so estimate it as the inverse of the norm
1878 // so we take small steps where the potential fluctuates a lot.
1879 stepsize
= 1.0/ems
.fnorm
;
1881 /* Start the loop over BFGS steps.
1882 * Each successful step is counted, and we continue until
1883 * we either converge or reach the max number of steps.
1888 /* Set the gradient from the force */
1890 for (step
= 0; (number_steps
< 0 || step
<= number_steps
) && !converged
; step
++)
1893 /* Write coordinates if necessary */
1894 do_x
= do_per_step(step
, inputrec
->nstxout
);
1895 do_f
= do_per_step(step
, inputrec
->nstfout
);
1900 mdof_flags
|= MDOF_X
;
1905 mdof_flags
|= MDOF_F
;
1910 mdof_flags
|= MDOF_IMD
;
1913 mdoutf_write_to_trajectory_files(fplog
, cr
, outf
, mdof_flags
,
1914 top_global
->natoms
, step
, static_cast<real
>(step
), &ems
.s
,
1915 state_global
, observablesHistory
, ems
.f
);
1917 /* Do the linesearching in the direction dx[point][0..(n-1)] */
1919 /* make s a pointer to current search direction - point=0 first time we get here */
1922 real
*xx
= static_cast<real
*>(ems
.s
.x
.rvec_array()[0]);
1923 real
*ff
= static_cast<real
*>(ems
.f
.rvec_array()[0]);
1925 // calculate line gradient in position A
1926 for (gpa
= 0, i
= 0; i
< n
; i
++)
1931 /* Calculate minimum allowed stepsize along the line, before the average (norm)
1932 * relative change in coordinate is smaller than precision
1934 for (minstep
= 0, i
= 0; i
< n
; i
++)
1944 minstep
= GMX_REAL_EPS
/sqrt(minstep
/n
);
1946 if (stepsize
< minstep
)
1952 // Before taking any steps along the line, store the old position
1954 real
*lastx
= static_cast<real
*>(last
->s
.x
.data()[0]);
1955 real
*lastf
= static_cast<real
*>(last
->f
.data()[0]);
1960 /* Take a step downhill.
1961 * In theory, we should find the actual minimum of the function in this
1962 * direction, somewhere along the line.
1963 * That is quite possible, but it turns out to take 5-10 function evaluations
1964 * for each line. However, we dont really need to find the exact minimum -
1965 * it is much better to start a new BFGS step in a modified direction as soon
1966 * as we are close to it. This will save a lot of energy evaluations.
1968 * In practice, we just try to take a single step.
1969 * If it worked (i.e. lowered the energy), we increase the stepsize but
1970 * continue straight to the next BFGS step without trying to find any minimum,
1971 * i.e. we change the search direction too. If the line was smooth, it is
1972 * likely we are in a smooth region, and then it makes sense to take longer
1973 * steps in the modified search direction too.
1975 * If it didn't work (higher energy), there must be a minimum somewhere between
1976 * the old position and the new one. Then we need to start by finding a lower
1977 * value before we change search direction. Since the energy was apparently
1978 * quite rough, we need to decrease the step size.
1980 * Due to the finite numerical accuracy, it turns out that it is a good idea
1981 * to accept a SMALL increase in energy, if the derivative is still downhill.
1982 * This leads to lower final energies in the tests I've done. / Erik
1985 // State "A" is the first position along the line.
1986 // reference position along line is initially zero
1989 // Check stepsize first. We do not allow displacements
1990 // larger than emstep.
1994 // Pick a new position C by adding stepsize to A.
1997 // Calculate what the largest change in any individual coordinate
1998 // would be (translation along line * gradient along line)
2000 for (i
= 0; i
< n
; i
++)
2003 if (delta
> maxdelta
)
2008 // If any displacement is larger than the stepsize limit, reduce the step
2009 if (maxdelta
> inputrec
->em_stepsize
)
2014 while (maxdelta
> inputrec
->em_stepsize
);
2016 // Take a trial step and move the coordinate array xc[] to position C
2017 real
*xc
= static_cast<real
*>(sc
->s
.x
.rvec_array()[0]);
2018 for (i
= 0; i
< n
; i
++)
2020 xc
[i
] = lastx
[i
] + c
*s
[i
];
2024 // Calculate energy for the trial step in position C
2025 energyEvaluator
.run(sc
, mu_tot
, vir
, pres
, step
, FALSE
);
2027 // Calc line gradient in position C
2028 real
*fc
= static_cast<real
*>(sc
->f
.rvec_array()[0]);
2029 for (gpc
= 0, i
= 0; i
< n
; i
++)
2031 gpc
-= s
[i
]*fc
[i
]; /* f is negative gradient, thus the sign */
2033 /* Sum the gradient along the line across CPUs */
2036 gmx_sumd(1, &gpc
, cr
);
2039 // This is the max amount of increase in energy we tolerate.
2040 // By allowing VERY small changes (close to numerical precision) we
2041 // frequently find even better (lower) final energies.
2042 tmp
= std::sqrt(GMX_REAL_EPS
)*fabs(sa
->epot
);
2044 // Accept the step if the energy is lower in the new position C (compared to A),
2045 // or if it is not significantly higher and the line derivative is still negative.
2046 foundlower
= sc
->epot
< sa
->epot
|| (gpc
< 0 && sc
->epot
< (sa
->epot
+ tmp
));
2047 // If true, great, we found a better energy. We no longer try to alter the
2048 // stepsize, but simply accept this new better position. The we select a new
2049 // search direction instead, which will be much more efficient than continuing
2050 // to take smaller steps along a line. Set fnorm based on the new C position,
2051 // which will be used to update the stepsize to 1/fnorm further down.
2053 // If false, the energy is NOT lower in point C, i.e. it will be the same
2054 // or higher than in point A. In this case it is pointless to move to point C,
2055 // so we will have to do more iterations along the same line to find a smaller
2056 // value in the interval [A=0.0,C].
2057 // Here, A is still 0.0, but that will change when we do a search in the interval
2058 // [0.0,C] below. That search we will do by interpolation or bisection rather
2059 // than with the stepsize, so no need to modify it. For the next search direction
2060 // it will be reset to 1/fnorm anyway.
2064 // OK, if we didn't find a lower value we will have to locate one now - there must
2065 // be one in the interval [a,c].
2066 // The same thing is valid here, though: Don't spend dozens of iterations to find
2067 // the line minimum. We try to interpolate based on the derivative at the endpoints,
2068 // and only continue until we find a lower value. In most cases this means 1-2 iterations.
2069 // I also have a safeguard for potentially really pathological functions so we never
2070 // take more than 20 steps before we give up.
2071 // If we already found a lower value we just skip this step and continue to the update.
2076 // Select a new trial point B in the interval [A,C].
2077 // If the derivatives at points a & c have different sign we interpolate to zero,
2078 // otherwise just do a bisection since there might be multiple minima/maxima
2079 // inside the interval.
2080 if (gpa
< 0 && gpc
> 0)
2082 b
= a
+ gpa
*(a
-c
)/(gpc
-gpa
);
2089 /* safeguard if interpolation close to machine accuracy causes errors:
2090 * never go outside the interval
2092 if (b
<= a
|| b
>= c
)
2097 // Take a trial step to point B
2098 real
*xb
= static_cast<real
*>(sb
->s
.x
.rvec_array()[0]);
2099 for (i
= 0; i
< n
; i
++)
2101 xb
[i
] = lastx
[i
] + b
*s
[i
];
2105 // Calculate energy for the trial step in point B
2106 energyEvaluator
.run(sb
, mu_tot
, vir
, pres
, step
, FALSE
);
2109 // Calculate gradient in point B
2110 real
*fb
= static_cast<real
*>(sb
->f
.rvec_array()[0]);
2111 for (gpb
= 0, i
= 0; i
< n
; i
++)
2113 gpb
-= s
[i
]*fb
[i
]; /* f is negative gradient, thus the sign */
2116 /* Sum the gradient along the line across CPUs */
2119 gmx_sumd(1, &gpb
, cr
);
2122 // Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
2123 // at the new point B, and rename the endpoints of this new interval A and C.
2126 /* Replace c endpoint with b */
2128 /* copy state b to c */
2133 /* Replace a endpoint with b */
2135 /* copy state b to a */
2140 * Stop search as soon as we find a value smaller than the endpoints,
2141 * or if the tolerance is below machine precision.
2142 * Never run more than 20 steps, no matter what.
2146 while ((sb
->epot
> sa
->epot
|| sb
->epot
> sc
->epot
) && (nminstep
< 20));
2148 if (std::fabs(sb
->epot
- Epot0
) < GMX_REAL_EPS
|| nminstep
>= 20)
2150 /* OK. We couldn't find a significantly lower energy.
2151 * If ncorr==0 this was steepest descent, and then we give up.
2152 * If not, reset memory to restart as steepest descent before quitting.
2164 /* Search in gradient direction */
2165 for (i
= 0; i
< n
; i
++)
2167 dx
[point
][i
] = ff
[i
];
2169 /* Reset stepsize */
2170 stepsize
= 1.0/fnorm
;
2175 /* Select min energy state of A & C, put the best in xx/ff/Epot
2177 if (sc
->epot
< sa
->epot
)
2199 /* Update the memory information, and calculate a new
2200 * approximation of the inverse hessian
2203 /* Have new data in Epot, xx, ff */
2204 if (ncorr
< nmaxcorr
)
2209 for (i
= 0; i
< n
; i
++)
2211 dg
[point
][i
] = lastf
[i
]-ff
[i
];
2212 dx
[point
][i
] *= step_taken
;
2217 for (i
= 0; i
< n
; i
++)
2219 dgdg
+= dg
[point
][i
]*dg
[point
][i
];
2220 dgdx
+= dg
[point
][i
]*dx
[point
][i
];
2225 rho
[point
] = 1.0/dgdx
;
2228 if (point
>= nmaxcorr
)
2234 for (i
= 0; i
< n
; i
++)
2241 /* Recursive update. First go back over the memory points */
2242 for (k
= 0; k
< ncorr
; k
++)
2251 for (i
= 0; i
< n
; i
++)
2253 sq
+= dx
[cp
][i
]*p
[i
];
2256 alpha
[cp
] = rho
[cp
]*sq
;
2258 for (i
= 0; i
< n
; i
++)
2260 p
[i
] -= alpha
[cp
]*dg
[cp
][i
];
2264 for (i
= 0; i
< n
; i
++)
2269 /* And then go forward again */
2270 for (k
= 0; k
< ncorr
; k
++)
2273 for (i
= 0; i
< n
; i
++)
2275 yr
+= p
[i
]*dg
[cp
][i
];
2279 beta
= alpha
[cp
]-beta
;
2281 for (i
= 0; i
< n
; i
++)
2283 p
[i
] += beta
*dx
[cp
][i
];
2293 for (i
= 0; i
< n
; i
++)
2297 dx
[point
][i
] = p
[i
];
2305 /* Print it if necessary */
2308 if (mdrunOptions
.verbose
)
2310 double sqrtNumAtoms
= sqrt(static_cast<double>(state_global
->natoms
));
2311 fprintf(stderr
, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n",
2312 step
, ems
.epot
, ems
.fnorm
/sqrtNumAtoms
, ems
.fmax
, ems
.a_fmax
+ 1);
2315 /* Store the new (lower) energies */
2316 matrix nullBox
= {};
2317 energyOutput
.addDataAtEnergyStep(false, false, static_cast<double>(step
),
2318 mdatoms
->tmass
, enerd
, nullptr, nullptr, nullptr, nullBox
,
2319 nullptr, nullptr, vir
, pres
, nullptr, mu_tot
, constr
);
2321 do_log
= do_per_step(step
, inputrec
->nstlog
);
2322 do_ene
= do_per_step(step
, inputrec
->nstenergy
);
2324 imdSession
->fillEnergyRecord(step
, TRUE
);
2328 energyOutput
.printHeader(fplog
, step
, step
);
2330 energyOutput
.printStepToEnergyFile(mdoutf_get_fp_ene(outf
), do_ene
, FALSE
, FALSE
,
2331 do_log
? fplog
: nullptr, step
, step
,
2335 /* Send x and E to IMD client, if bIMD is TRUE. */
2336 if (imdSession
->run(step
, TRUE
, state_global
->box
, state_global
->x
.rvec_array(), 0) && MASTER(cr
))
2338 imdSession
->sendPositionsAndEnergies();
2341 // Reset stepsize in we are doing more iterations
2344 /* Stop when the maximum force lies below tolerance.
2345 * If we have reached machine precision, converged is already set to true.
2347 converged
= converged
|| (ems
.fmax
< inputrec
->em_tol
);
2349 } /* End of the loop */
2353 step
--; /* we never took that last step in this case */
2356 if (ems
.fmax
> inputrec
->em_tol
)
2360 warn_step(fplog
, inputrec
->em_tol
, ems
.fmax
,
2361 step
-1 == number_steps
, FALSE
);
2366 /* If we printed energy and/or logfile last step (which was the last step)
2367 * we don't have to do it again, but otherwise print the final values.
2369 if (!do_log
) /* Write final value to log since we didn't do anythin last step */
2371 energyOutput
.printHeader(fplog
, step
, step
);
2373 if (!do_ene
|| !do_log
) /* Write final energy file entries */
2375 energyOutput
.printStepToEnergyFile(mdoutf_get_fp_ene(outf
), !do_ene
, FALSE
, FALSE
,
2376 !do_log
? fplog
: nullptr, step
, step
,
2380 /* Print some stuff... */
2383 fprintf(stderr
, "\nwriting lowest energy coordinates.\n");
2387 * For accurate normal mode calculation it is imperative that we
2388 * store the last conformation into the full precision binary trajectory.
2390 * However, we should only do it if we did NOT already write this step
2391 * above (which we did if do_x or do_f was true).
2393 do_x
= !do_per_step(step
, inputrec
->nstxout
);
2394 do_f
= !do_per_step(step
, inputrec
->nstfout
);
2395 write_em_traj(fplog
, cr
, outf
, do_x
, do_f
, ftp2fn(efSTO
, nfile
, fnm
),
2396 top_global
, inputrec
, step
,
2397 &ems
, state_global
, observablesHistory
);
2401 double sqrtNumAtoms
= sqrt(static_cast<double>(state_global
->natoms
));
2402 print_converged(stderr
, LBFGS
, inputrec
->em_tol
, step
, converged
,
2403 number_steps
, &ems
, sqrtNumAtoms
);
2404 print_converged(fplog
, LBFGS
, inputrec
->em_tol
, step
, converged
,
2405 number_steps
, &ems
, sqrtNumAtoms
);
2407 fprintf(fplog
, "\nPerformed %d energy evaluations in total.\n", neval
);
2410 finish_em(cr
, outf
, walltime_accounting
, wcycle
);
2412 /* To print the actual number of steps we needed somewhere */
2413 walltime_accounting_set_nsteps_done(walltime_accounting
, step
);
2417 LegacySimulator::do_steep()
2419 const char *SD
= "Steepest Descents";
2421 gmx_global_stat_t gstat
;
2425 gmx_bool bDone
, bAbort
, do_x
, do_f
;
2430 int steps_accepted
= 0;
2431 auto mdatoms
= mdAtoms
->mdatoms();
2433 GMX_LOG(mdlog
.info
).asParagraph().
2434 appendText("Note that activating steepest-descent energy minimization via the "
2435 "integrator .mdp option and the command gmx mdrun may "
2436 "be available in a different form in a future version of GROMACS, "
2437 "e.g. gmx minimize and an .mdp option.");
2439 /* Create 2 states on the stack and extract pointers that we will swap */
2440 em_state_t s0
{}, s1
{};
2441 em_state_t
*s_min
= &s0
;
2442 em_state_t
*s_try
= &s1
;
2444 /* Init em and store the local state in s_try */
2445 init_em(fplog
, mdlog
, SD
, cr
, inputrec
, imdSession
,
2447 state_global
, top_global
, s_try
, &top
,
2448 nrnb
, fr
, &graph
, mdAtoms
, &gstat
,
2449 vsite
, constr
, nullptr);
2450 gmx_mdoutf
*outf
= init_mdoutf(fplog
, nfile
, fnm
, mdrunOptions
, cr
, outputProvider
, mdModulesNotifier
, inputrec
, top_global
, nullptr, wcycle
,
2451 StartingBehavior::NewSimulation
);
2452 gmx::EnergyOutput
energyOutput(mdoutf_get_fp_ene(outf
), top_global
, inputrec
, pull_work
, nullptr, false, mdModulesNotifier
);
2454 /* Print to log file */
2455 print_em_start(fplog
, cr
, walltime_accounting
, wcycle
, SD
);
2457 /* Set variables for stepsize (in nm). This is the largest
2458 * step that we are going to make in any direction.
2460 ustep
= inputrec
->em_stepsize
;
2463 /* Max number of steps */
2464 nsteps
= inputrec
->nsteps
;
2468 /* Print to the screen */
2469 sp_header(stderr
, SD
, inputrec
->em_tol
, nsteps
);
2473 sp_header(fplog
, SD
, inputrec
->em_tol
, nsteps
);
2475 EnergyEvaluator energyEvaluator
{
2476 fplog
, mdlog
, cr
, ms
,
2478 inputrec
, imdSession
, pull_work
, nrnb
, wcycle
, gstat
,
2479 vsite
, constr
, fcd
, graph
,
2480 mdAtoms
, fr
, mdScheduleWork
, enerd
2483 /**** HERE STARTS THE LOOP ****
2484 * count is the counter for the number of steps
2485 * bDone will be TRUE when the minimization has converged
2486 * bAbort will be TRUE when nsteps steps have been performed or when
2487 * the stepsize becomes smaller than is reasonable for machine precision
2492 while (!bDone
&& !bAbort
)
2494 bAbort
= (nsteps
>= 0) && (count
== nsteps
);
2496 /* set new coordinates, except for first step */
2497 bool validStep
= true;
2501 do_em_step(cr
, inputrec
, mdatoms
,
2502 s_min
, stepsize
, &s_min
->f
, s_try
,
2508 energyEvaluator
.run(s_try
, mu_tot
, vir
, pres
, count
, count
== 0);
2512 // Signal constraint error during stepping with energy=inf
2513 s_try
->epot
= std::numeric_limits
<real
>::infinity();
2518 energyOutput
.printHeader(fplog
, count
, count
);
2523 s_min
->epot
= s_try
->epot
;
2526 /* Print it if necessary */
2529 if (mdrunOptions
.verbose
)
2531 fprintf(stderr
, "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
2532 count
, ustep
, s_try
->epot
, s_try
->fmax
, s_try
->a_fmax
+1,
2533 ( (count
== 0) || (s_try
->epot
< s_min
->epot
) ) ? '\n' : '\r');
2537 if ( (count
== 0) || (s_try
->epot
< s_min
->epot
) )
2539 /* Store the new (lower) energies */
2540 matrix nullBox
= {};
2541 energyOutput
.addDataAtEnergyStep(false, false, static_cast<double>(count
),
2542 mdatoms
->tmass
, enerd
, nullptr, nullptr, nullptr, nullBox
,
2543 nullptr, nullptr, vir
, pres
, nullptr, mu_tot
, constr
);
2545 imdSession
->fillEnergyRecord(count
, TRUE
);
2547 const bool do_dr
= do_per_step(steps_accepted
, inputrec
->nstdisreout
);
2548 const bool do_or
= do_per_step(steps_accepted
, inputrec
->nstorireout
);
2549 energyOutput
.printStepToEnergyFile(mdoutf_get_fp_ene(outf
), TRUE
,
2551 fplog
, count
, count
,
2557 /* Now if the new energy is smaller than the previous...
2558 * or if this is the first step!
2559 * or if we did random steps!
2562 if ( (count
== 0) || (s_try
->epot
< s_min
->epot
) )
2566 /* Test whether the convergence criterion is met... */
2567 bDone
= (s_try
->fmax
< inputrec
->em_tol
);
2569 /* Copy the arrays for force, positions and energy */
2570 /* The 'Min' array always holds the coords and forces of the minimal
2572 swap_em_state(&s_min
, &s_try
);
2578 /* Write to trn, if necessary */
2579 do_x
= do_per_step(steps_accepted
, inputrec
->nstxout
);
2580 do_f
= do_per_step(steps_accepted
, inputrec
->nstfout
);
2581 write_em_traj(fplog
, cr
, outf
, do_x
, do_f
, nullptr,
2582 top_global
, inputrec
, count
,
2583 s_min
, state_global
, observablesHistory
);
2587 /* If energy is not smaller make the step smaller... */
2590 if (DOMAINDECOMP(cr
) && s_min
->s
.ddp_count
!= cr
->dd
->ddp_count
)
2592 /* Reload the old state */
2593 em_dd_partition_system(fplog
, mdlog
, count
, cr
, top_global
, inputrec
, imdSession
,
2595 s_min
, &top
, mdAtoms
, fr
, vsite
, constr
,
2600 // If the force is very small after finishing minimization,
2601 // we risk dividing by zero when calculating the step size.
2602 // So we check first if the minimization has stopped before
2603 // trying to obtain a new step size.
2606 /* Determine new step */
2607 stepsize
= ustep
/s_min
->fmax
;
2610 /* Check if stepsize is too small, with 1 nm as a characteristic length */
2612 if (count
== nsteps
|| ustep
< 1e-12)
2614 if (count
== nsteps
|| ustep
< 1e-6)
2619 warn_step(fplog
, inputrec
->em_tol
, s_min
->fmax
,
2620 count
== nsteps
, constr
!= nullptr);
2625 /* Send IMD energies and positions, if bIMD is TRUE. */
2626 if (imdSession
->run(count
, TRUE
, state_global
->box
,
2627 MASTER(cr
) ? state_global
->x
.rvec_array() : nullptr,
2631 imdSession
->sendPositionsAndEnergies();
2635 } /* End of the loop */
2637 /* Print some data... */
2640 fprintf(stderr
, "\nwriting lowest energy coordinates.\n");
2642 write_em_traj(fplog
, cr
, outf
, TRUE
, inputrec
->nstfout
!= 0, ftp2fn(efSTO
, nfile
, fnm
),
2643 top_global
, inputrec
, count
,
2644 s_min
, state_global
, observablesHistory
);
2648 double sqrtNumAtoms
= sqrt(static_cast<double>(state_global
->natoms
));
2650 print_converged(stderr
, SD
, inputrec
->em_tol
, count
, bDone
, nsteps
,
2651 s_min
, sqrtNumAtoms
);
2652 print_converged(fplog
, SD
, inputrec
->em_tol
, count
, bDone
, nsteps
,
2653 s_min
, sqrtNumAtoms
);
2656 finish_em(cr
, outf
, walltime_accounting
, wcycle
);
2658 /* To print the actual number of steps we needed somewhere */
2659 inputrec
->nsteps
= count
;
2661 walltime_accounting_set_nsteps_done(walltime_accounting
, count
);
2665 LegacySimulator::do_nm()
2667 const char *NM
= "Normal Mode Analysis";
2670 gmx_global_stat_t gstat
;
2675 gmx_bool bSparse
; /* use sparse matrix storage format */
2677 gmx_sparsematrix_t
* sparse_matrix
= nullptr;
2678 real
* full_matrix
= nullptr;
2680 /* added with respect to mdrun */
2682 real der_range
= 10.0*std::sqrt(GMX_REAL_EPS
);
2684 bool bIsMaster
= MASTER(cr
);
2685 auto mdatoms
= mdAtoms
->mdatoms();
2687 GMX_LOG(mdlog
.info
).asParagraph().
2688 appendText("Note that activating normal-mode analysis via the integrator "
2689 ".mdp option and the command gmx mdrun may "
2690 "be available in a different form in a future version of GROMACS, "
2691 "e.g. gmx normal-modes.");
2693 if (constr
!= nullptr)
2695 gmx_fatal(FARGS
, "Constraints present with Normal Mode Analysis, this combination is not supported");
2698 gmx_shellfc_t
*shellfc
;
2700 em_state_t state_work
{};
2702 /* Init em and store the local state in state_minimum */
2703 init_em(fplog
, mdlog
, NM
, cr
, inputrec
, imdSession
,
2705 state_global
, top_global
, &state_work
, &top
,
2706 nrnb
, fr
, &graph
, mdAtoms
, &gstat
,
2707 vsite
, constr
, &shellfc
);
2708 gmx_mdoutf
*outf
= init_mdoutf(fplog
, nfile
, fnm
, mdrunOptions
, cr
, outputProvider
, mdModulesNotifier
, inputrec
, top_global
, nullptr, wcycle
,
2709 StartingBehavior::NewSimulation
);
2711 std::vector
<int> atom_index
= get_atom_index(top_global
);
2712 std::vector
<gmx::RVec
> fneg(atom_index
.size(), {0, 0, 0});
2713 snew(dfdx
, atom_index
.size());
2719 "NOTE: This version of GROMACS has been compiled in single precision,\n"
2720 " which MIGHT not be accurate enough for normal mode analysis.\n"
2721 " GROMACS now uses sparse matrix storage, so the memory requirements\n"
2722 " are fairly modest even if you recompile in double precision.\n\n");
2726 /* Check if we can/should use sparse storage format.
2728 * Sparse format is only useful when the Hessian itself is sparse, which it
2729 * will be when we use a cutoff.
2730 * For small systems (n<1000) it is easier to always use full matrix format, though.
2732 if (EEL_FULL(fr
->ic
->eeltype
) || fr
->rlist
== 0.0)
2734 GMX_LOG(mdlog
.warning
).appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
2737 else if (atom_index
.size() < 1000)
2739 GMX_LOG(mdlog
.warning
).appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
2745 GMX_LOG(mdlog
.warning
).appendText("Using compressed symmetric sparse Hessian format.");
2749 /* Number of dimensions, based on real atoms, that is not vsites or shell */
2750 sz
= DIM
*atom_index
.size();
2752 fprintf(stderr
, "Allocating Hessian memory...\n\n");
2756 sparse_matrix
= gmx_sparsematrix_init(sz
);
2757 sparse_matrix
->compressed_symmetric
= TRUE
;
2761 snew(full_matrix
, sz
*sz
);
2764 /* Write start time and temperature */
2765 print_em_start(fplog
, cr
, walltime_accounting
, wcycle
, NM
);
2767 /* fudge nr of steps to nr of atoms */
2768 inputrec
->nsteps
= atom_index
.size()*2;
2772 fprintf(stderr
, "starting normal mode calculation '%s'\n%" PRId64
" steps.\n\n",
2773 *(top_global
->name
), inputrec
->nsteps
);
2776 nnodes
= cr
->nnodes
;
2778 /* Make evaluate_energy do a single node force calculation */
2780 EnergyEvaluator energyEvaluator
{
2781 fplog
, mdlog
, cr
, ms
,
2783 inputrec
, imdSession
, pull_work
, nrnb
, wcycle
, gstat
,
2784 vsite
, constr
, fcd
, graph
,
2785 mdAtoms
, fr
, mdScheduleWork
, enerd
2787 energyEvaluator
.run(&state_work
, mu_tot
, vir
, pres
, -1, TRUE
);
2788 cr
->nnodes
= nnodes
;
2790 /* if forces are not small, warn user */
2791 get_state_f_norm_max(cr
, &(inputrec
->opts
), mdatoms
, &state_work
);
2793 GMX_LOG(mdlog
.warning
).appendTextFormatted("Maximum force:%12.5e", state_work
.fmax
);
2794 if (state_work
.fmax
> 1.0e-3)
2796 GMX_LOG(mdlog
.warning
).appendText(
2797 "The force is probably not small enough to "
2798 "ensure that you are at a minimum.\n"
2799 "Be aware that negative eigenvalues may occur\n"
2800 "when the resulting matrix is diagonalized.");
2803 /***********************************************************
2805 * Loop over all pairs in matrix
2807 * do_force called twice. Once with positive and
2808 * once with negative displacement
2810 ************************************************************/
2812 /* Steps are divided one by one over the nodes */
2814 auto state_work_x
= makeArrayRef(state_work
.s
.x
);
2815 auto state_work_f
= makeArrayRef(state_work
.f
);
2816 for (index aid
= cr
->nodeid
; aid
< ssize(atom_index
); aid
+= nnodes
)
2818 size_t atom
= atom_index
[aid
];
2819 for (size_t d
= 0; d
< DIM
; d
++)
2822 int force_flags
= GMX_FORCE_STATECHANGED
| GMX_FORCE_ALLFORCES
;
2825 x_min
= state_work_x
[atom
][d
];
2827 for (unsigned int dx
= 0; (dx
< 2); dx
++)
2831 state_work_x
[atom
][d
] = x_min
- der_range
;
2835 state_work_x
[atom
][d
] = x_min
+ der_range
;
2838 /* Make evaluate_energy do a single node force calculation */
2842 /* Now is the time to relax the shells */
2843 relax_shell_flexcon(fplog
,
2846 mdrunOptions
.verbose
,
2858 state_work
.s
.natoms
,
2859 state_work
.s
.x
.arrayRefWithPadding(),
2860 state_work
.s
.v
.arrayRefWithPadding(),
2862 state_work
.s
.lambda
,
2864 state_work
.f
.arrayRefWithPadding(),
2876 DDBalanceRegionHandler(nullptr));
2882 energyEvaluator
.run(&state_work
, mu_tot
, vir
, pres
, aid
*2+dx
, FALSE
);
2885 cr
->nnodes
= nnodes
;
2889 std::copy(state_work_f
.begin(), state_work_f
.begin()+atom_index
.size(), fneg
.begin());
2893 /* x is restored to original */
2894 state_work_x
[atom
][d
] = x_min
;
2896 for (size_t j
= 0; j
< atom_index
.size(); j
++)
2898 for (size_t k
= 0; (k
< DIM
); k
++)
2901 -(state_work_f
[atom_index
[j
]][k
] - fneg
[j
][k
])/(2*der_range
);
2908 #define mpi_type GMX_MPI_REAL
2909 MPI_Send(dfdx
[0], atom_index
.size()*DIM
, mpi_type
, MASTER(cr
),
2910 cr
->nodeid
, cr
->mpi_comm_mygroup
);
2915 for (index node
= 0; (node
< nnodes
&& aid
+node
< ssize(atom_index
)); node
++)
2921 MPI_Recv(dfdx
[0], atom_index
.size()*DIM
, mpi_type
, node
, node
,
2922 cr
->mpi_comm_mygroup
, &stat
);
2927 row
= (aid
+ node
)*DIM
+ d
;
2929 for (size_t j
= 0; j
< atom_index
.size(); j
++)
2931 for (size_t k
= 0; k
< DIM
; k
++)
2937 if (col
>= row
&& dfdx
[j
][k
] != 0.0)
2939 gmx_sparsematrix_increment_value(sparse_matrix
,
2940 row
, col
, dfdx
[j
][k
]);
2945 full_matrix
[row
*sz
+col
] = dfdx
[j
][k
];
2952 if (mdrunOptions
.verbose
&& fplog
)
2957 /* write progress */
2958 if (bIsMaster
&& mdrunOptions
.verbose
)
2960 fprintf(stderr
, "\rFinished step %d out of %td",
2961 std::min
<int>(atom
+nnodes
, atom_index
.size()),
2969 fprintf(stderr
, "\n\nWriting Hessian...\n");
2970 gmx_mtxio_write(ftp2fn(efMTX
, nfile
, fnm
), sz
, sz
, full_matrix
, sparse_matrix
);
2973 finish_em(cr
, outf
, walltime_accounting
, wcycle
);
2975 walltime_accounting_set_nsteps_done(walltime_accounting
, atom_index
.size()*2);