2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2008, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/gmxlib/chargegroup.h"
53 #include "gromacs/gmxlib/network.h"
54 #include "gromacs/math/functions.h"
55 #include "gromacs/math/units.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/math/vecdump.h"
58 #include "gromacs/mdlib/constr.h"
59 #include "gromacs/mdlib/force.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/sim_util.h"
62 #include "gromacs/mdlib/vsite.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/inputrec.h"
65 #include "gromacs/mdtypes/md_enums.h"
66 #include "gromacs/mdtypes/state.h"
67 #include "gromacs/pbcutil/mshift.h"
68 #include "gromacs/pbcutil/pbc.h"
69 #include "gromacs/topology/mtop_lookup.h"
70 #include "gromacs/topology/mtop_util.h"
71 #include "gromacs/utility/arraysize.h"
72 #include "gromacs/utility/cstringutil.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/smalloc.h"
78 int shell
; /* The shell id */
79 int nucl1
, nucl2
, nucl3
; /* The nuclei connected to the shell */
80 /* gmx_bool bInterCG; */ /* Coupled to nuclei outside cg? */
81 real k
; /* force constant */
82 real k_1
; /* 1 over force constant */
88 struct gmx_shellfc_t
{
89 /* Shell counts, indices, parameters and working data */
90 int nshell_gl
; /* The number of shells in the system */
91 t_shell
*shell_gl
; /* All the shells (for DD only) */
92 int *shell_index_gl
; /* Global shell index (for DD only) */
93 gmx_bool bInterCG
; /* Are there inter charge-group shells? */
94 int nshell
; /* The number of local shells */
95 t_shell
*shell
; /* The local shells */
96 int shell_nalloc
; /* The allocation size of shell */
97 gmx_bool bPredict
; /* Predict shell positions */
98 gmx_bool bRequireInit
; /* Require initialization of shell positions */
99 int nflexcon
; /* The number of flexible constraints */
101 /* Temporary arrays, should be fixed size 2 when fully converted to C++ */
102 PaddedRVecVector
*x
; /* Array for iterative minimization */
103 PaddedRVecVector
*f
; /* Array for iterative minimization */
105 /* Flexible constraint working data */
106 rvec
*acc_dir
; /* Acceleration direction for flexcon */
107 rvec
*x_old
; /* Old coordinates for flexcon */
108 int flex_nalloc
; /* The allocation size of acc_dir and x_old */
109 rvec
*adir_xnold
; /* Work space for init_adir */
110 rvec
*adir_xnew
; /* Work space for init_adir */
111 int adir_nalloc
; /* Work space for init_adir */
112 std::int64_t numForceEvaluations
; /* Total number of force evaluations */
113 int numConvergedIterations
; /* Total number of iterations that converged */
117 static void pr_shell(FILE *fplog
, int ns
, t_shell s
[])
121 fprintf(fplog
, "SHELL DATA\n");
122 fprintf(fplog
, "%5s %8s %5s %5s %5s\n",
123 "Shell", "Force k", "Nucl1", "Nucl2", "Nucl3");
124 for (i
= 0; (i
< ns
); i
++)
126 fprintf(fplog
, "%5d %8.3f %5d", s
[i
].shell
, 1.0/s
[i
].k_1
, s
[i
].nucl1
);
129 fprintf(fplog
, " %5d\n", s
[i
].nucl2
);
131 else if (s
[i
].nnucl
== 3)
133 fprintf(fplog
, " %5d %5d\n", s
[i
].nucl2
, s
[i
].nucl3
);
137 fprintf(fplog
, "\n");
142 /* TODO The remain call of this function passes non-NULL mass and NULL
143 * mtop, so this routine can be simplified.
145 * The other code path supported doing prediction before the MD loop
146 * started, but even when called, the prediction was always
147 * over-written by a subsequent call in the MD loop, so has been
149 static void predict_shells(FILE *fplog
, rvec x
[], rvec v
[], real dt
,
151 real mass
[], gmx_mtop_t
*mtop
, gmx_bool bInit
)
153 int i
, m
, s1
, n1
, n2
, n3
;
154 real dt_1
, fudge
, tm
, m1
, m2
, m3
;
157 /* We introduce a fudge factor for performance reasons: with this choice
158 * the initial force on the shells is about a factor of two lower than
167 fprintf(fplog
, "RELAX: Using prediction for initial shell placement\n");
179 for (i
= 0; (i
< ns
); i
++)
190 for (m
= 0; (m
< DIM
); m
++)
192 x
[s1
][m
] += ptr
[n1
][m
]*dt_1
;
205 /* Not the correct masses with FE, but it is just a prediction... */
206 m1
= mtopGetAtomMass(mtop
, n1
, &molb
);
207 m2
= mtopGetAtomMass(mtop
, n2
, &molb
);
210 for (m
= 0; (m
< DIM
); m
++)
212 x
[s1
][m
] += (m1
*ptr
[n1
][m
]+m2
*ptr
[n2
][m
])*tm
;
227 /* Not the correct masses with FE, but it is just a prediction... */
228 m1
= mtopGetAtomMass(mtop
, n1
, &molb
);
229 m2
= mtopGetAtomMass(mtop
, n2
, &molb
);
230 m3
= mtopGetAtomMass(mtop
, n3
, &molb
);
232 tm
= dt_1
/(m1
+m2
+m3
);
233 for (m
= 0; (m
< DIM
); m
++)
235 x
[s1
][m
] += (m1
*ptr
[n1
][m
]+m2
*ptr
[n2
][m
]+m3
*ptr
[n3
][m
])*tm
;
239 gmx_fatal(FARGS
, "Shell %d has %d nuclei!", i
, s
[i
].nnucl
);
244 /*! \brief Count the different particle types in a system
246 * Routine prints a warning to stderr in case an unknown particle type
248 * \param[in] fplog Print what we have found if not NULL
249 * \param[in] mtop Molecular topology.
250 * \returns Array holding the number of particles of a type
252 static std::array
<int, eptNR
> countPtypes(FILE *fplog
,
255 std::array
<int, eptNR
> nptype
= { { 0 } };
256 /* Count number of shells, and find their indices */
257 for (int i
= 0; (i
< eptNR
); i
++)
262 gmx_mtop_atomloop_block_t aloopb
= gmx_mtop_atomloop_block_init(mtop
);
265 while (gmx_mtop_atomloop_block_next(aloopb
, &atom
, &nmol
))
272 nptype
[atom
->ptype
] += nmol
;
275 fprintf(stderr
, "Warning unsupported particle type %d in countPtypes",
276 static_cast<int>(atom
->ptype
));
281 /* Print the number of each particle type */
283 for (const auto &i
: nptype
)
287 fprintf(fplog
, "There are: %d %ss\n", i
, ptype_str
[n
]);
295 gmx_shellfc_t
*init_shell_flexcon(FILE *fplog
,
296 gmx_mtop_t
*mtop
, int nflexcon
,
298 bool usingDomainDecomposition
)
302 int *shell_index
= nullptr, *at2cg
;
306 int i
, j
, type
, mb
, a_offset
, cg
, mol
, ftype
, nra
;
308 int aS
, aN
= 0; /* Shell and nucleus */
309 int bondtypes
[] = { F_BONDS
, F_HARMONIC
, F_CUBICBONDS
, F_POLARIZATION
, F_ANHARM_POL
, F_WATER_POL
};
310 #define NBT asize(bondtypes)
312 gmx_mtop_atomloop_all_t aloop
;
313 gmx_ffparams_t
*ffparams
;
314 gmx_molblock_t
*molb
;
318 std::array
<int, eptNR
> n
= countPtypes(fplog
, mtop
);
319 nshell
= n
[eptShell
];
321 if (nshell
== 0 && nflexcon
== 0)
323 /* We're not doing shells or flexible constraints */
328 shfc
->x
= new PaddedRVecVector
[2] {};
329 shfc
->f
= new PaddedRVecVector
[2] {};
330 shfc
->nflexcon
= nflexcon
;
334 /* Only flexible constraints, no shells.
335 * Note that make_local_shells() does not need to be called.
338 shfc
->bPredict
= FALSE
;
343 if (nstcalcenergy
!= 1)
345 gmx_fatal(FARGS
, "You have nstcalcenergy set to a value (%d) that is different from 1.\nThis is not supported in combination with shell particles.\nPlease make a new tpr file.", nstcalcenergy
);
347 if (usingDomainDecomposition
)
349 gmx_fatal(FARGS
, "Shell particles are not implemented with domain decomposition, use a single rank");
352 /* We have shells: fill the shell data structure */
354 /* Global system sized array, this should be avoided */
355 snew(shell_index
, mtop
->natoms
);
357 aloop
= gmx_mtop_atomloop_all_init(mtop
);
359 while (gmx_mtop_atomloop_all_next(aloop
, &i
, &atom
))
361 if (atom
->ptype
== eptShell
)
363 shell_index
[i
] = nshell
++;
369 /* Initiate the shell structures */
370 for (i
= 0; (i
< nshell
); i
++)
377 /* shell[i].bInterCG=FALSE; */
382 ffparams
= &mtop
->ffparams
;
384 /* Now fill the structures */
385 shfc
->bInterCG
= FALSE
;
388 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
390 molb
= &mtop
->molblock
[mb
];
391 molt
= &mtop
->moltype
[molb
->type
];
394 snew(at2cg
, molt
->atoms
.nr
);
395 for (cg
= 0; cg
< cgs
->nr
; cg
++)
397 for (i
= cgs
->index
[cg
]; i
< cgs
->index
[cg
+1]; i
++)
403 atom
= molt
->atoms
.atom
;
404 for (mol
= 0; mol
< molb
->nmol
; mol
++)
406 for (j
= 0; (j
< NBT
); j
++)
408 ia
= molt
->ilist
[bondtypes
[j
]].iatoms
;
409 for (i
= 0; (i
< molt
->ilist
[bondtypes
[j
]].nr
); )
412 ftype
= ffparams
->functype
[type
];
413 nra
= interaction_function
[ftype
].nratoms
;
415 /* Check whether we have a bond with a shell */
418 switch (bondtypes
[j
])
425 if (atom
[ia
[1]].ptype
== eptShell
)
430 else if (atom
[ia
[2]].ptype
== eptShell
)
437 aN
= ia
[4]; /* Dummy */
438 aS
= ia
[5]; /* Shell */
441 gmx_fatal(FARGS
, "Death Horror: %s, %d", __FILE__
, __LINE__
);
448 /* Check whether one of the particles is a shell... */
449 nsi
= shell_index
[a_offset
+aS
];
450 if ((nsi
< 0) || (nsi
>= nshell
))
452 gmx_fatal(FARGS
, "nsi is %d should be within 0 - %d. aS = %d",
455 if (shell
[nsi
].shell
== -1)
457 shell
[nsi
].shell
= a_offset
+ aS
;
460 else if (shell
[nsi
].shell
!= a_offset
+aS
)
462 gmx_fatal(FARGS
, "Weird stuff in %s, %d", __FILE__
, __LINE__
);
465 if (shell
[nsi
].nucl1
== -1)
467 shell
[nsi
].nucl1
= a_offset
+ aN
;
469 else if (shell
[nsi
].nucl2
== -1)
471 shell
[nsi
].nucl2
= a_offset
+ aN
;
473 else if (shell
[nsi
].nucl3
== -1)
475 shell
[nsi
].nucl3
= a_offset
+ aN
;
481 pr_shell(fplog
, ns
, shell
);
483 gmx_fatal(FARGS
, "Can not handle more than three bonds per shell\n");
485 if (at2cg
[aS
] != at2cg
[aN
])
487 /* shell[nsi].bInterCG = TRUE; */
488 shfc
->bInterCG
= TRUE
;
491 switch (bondtypes
[j
])
495 shell
[nsi
].k
+= ffparams
->iparams
[type
].harmonic
.krA
;
498 shell
[nsi
].k
+= ffparams
->iparams
[type
].cubic
.kb
;
502 if (!gmx_within_tol(qS
, atom
[aS
].qB
, GMX_REAL_EPS
*10))
504 gmx_fatal(FARGS
, "polarize can not be used with qA(%e) != qB(%e) for atom %d of molecule block %d", qS
, atom
[aS
].qB
, aS
+1, mb
+1);
506 shell
[nsi
].k
+= gmx::square(qS
)*ONE_4PI_EPS0
/
507 ffparams
->iparams
[type
].polarize
.alpha
;
510 if (!gmx_within_tol(qS
, atom
[aS
].qB
, GMX_REAL_EPS
*10))
512 gmx_fatal(FARGS
, "water_pol can not be used with qA(%e) != qB(%e) for atom %d of molecule block %d", qS
, atom
[aS
].qB
, aS
+1, mb
+1);
514 alpha
= (ffparams
->iparams
[type
].wpol
.al_x
+
515 ffparams
->iparams
[type
].wpol
.al_y
+
516 ffparams
->iparams
[type
].wpol
.al_z
)/3.0;
517 shell
[nsi
].k
+= gmx::square(qS
)*ONE_4PI_EPS0
/alpha
;
520 gmx_fatal(FARGS
, "Death Horror: %s, %d", __FILE__
, __LINE__
);
528 a_offset
+= molt
->atoms
.nr
;
530 /* Done with this molecule type */
534 /* Verify whether it's all correct */
537 gmx_fatal(FARGS
, "Something weird with shells. They may not be bonded to something");
540 for (i
= 0; (i
< ns
); i
++)
542 shell
[i
].k_1
= 1.0/shell
[i
].k
;
547 pr_shell(debug
, ns
, shell
);
551 shfc
->nshell_gl
= ns
;
552 shfc
->shell_gl
= shell
;
553 shfc
->shell_index_gl
= shell_index
;
555 shfc
->bPredict
= (getenv("GMX_NOPREDICT") == nullptr);
556 shfc
->bRequireInit
= FALSE
;
561 fprintf(fplog
, "\nWill never predict shell positions\n");
566 shfc
->bRequireInit
= (getenv("GMX_REQUIRE_SHELL_INIT") != nullptr);
567 if (shfc
->bRequireInit
&& fplog
)
569 fprintf(fplog
, "\nWill always initiate shell positions\n");
579 fprintf(fplog
, "\nNOTE: there all shells that are connected to particles outside thier own charge group, will not predict shells positions during the run\n\n");
581 /* Prediction improves performance, so we should implement either:
582 * 1. communication for the atoms needed for prediction
583 * 2. prediction using the velocities of shells; currently the
584 * shell velocities are zeroed, it's a bit tricky to keep
585 * track of the shell displacements and thus the velocity.
587 shfc
->bPredict
= FALSE
;
594 void make_local_shells(t_commrec
*cr
, t_mdatoms
*md
,
598 int a0
, a1
, *ind
, nshell
, i
;
599 gmx_domdec_t
*dd
= nullptr;
601 if (DOMAINDECOMP(cr
))
609 /* Single node: we need all shells, just copy the pointer */
610 shfc
->nshell
= shfc
->nshell_gl
;
611 shfc
->shell
= shfc
->shell_gl
;
616 ind
= shfc
->shell_index_gl
;
620 for (i
= a0
; i
< a1
; i
++)
622 if (md
->ptype
[i
] == eptShell
)
624 if (nshell
+1 > shfc
->shell_nalloc
)
626 shfc
->shell_nalloc
= over_alloc_dd(nshell
+1);
627 srenew(shell
, shfc
->shell_nalloc
);
631 shell
[nshell
] = shfc
->shell_gl
[ind
[dd
->gatindex
[i
]]];
635 shell
[nshell
] = shfc
->shell_gl
[ind
[i
]];
638 /* With inter-cg shells we can no do shell prediction,
639 * so we do not need the nuclei numbers.
643 shell
[nshell
].nucl1
= i
+ shell
[nshell
].nucl1
- shell
[nshell
].shell
;
644 if (shell
[nshell
].nnucl
> 1)
646 shell
[nshell
].nucl2
= i
+ shell
[nshell
].nucl2
- shell
[nshell
].shell
;
648 if (shell
[nshell
].nnucl
> 2)
650 shell
[nshell
].nucl3
= i
+ shell
[nshell
].nucl3
- shell
[nshell
].shell
;
653 shell
[nshell
].shell
= i
;
658 shfc
->nshell
= nshell
;
662 static void do_1pos(rvec xnew
, const rvec xold
, const rvec f
, real step
)
680 static void do_1pos3(rvec xnew
, const rvec xold
, const rvec f
, const rvec step
)
698 static void directional_sd(const PaddedRVecVector
*xold
, PaddedRVecVector
*xnew
, const rvec acc_dir
[],
699 int homenr
, real step
)
701 const rvec
*xo
= as_rvec_array(xold
->data());
702 rvec
*xn
= as_rvec_array(xnew
->data());
704 for (int i
= 0; i
< homenr
; i
++)
706 do_1pos(xn
[i
], xo
[i
], acc_dir
[i
], step
);
710 static void shell_pos_sd(const PaddedRVecVector
* gmx_restrict xcur
,
711 PaddedRVecVector
* gmx_restrict xnew
,
712 const PaddedRVecVector
*f
,
713 int ns
, t_shell s
[], int count
)
715 const real step_scale_min
= 0.8,
716 step_scale_increment
= 0.2,
717 step_scale_max
= 1.2,
718 step_scale_multiple
= (step_scale_max
- step_scale_min
) / step_scale_increment
;
723 real step_min
, step_max
;
728 for (i
= 0; (i
< ns
); i
++)
733 for (d
= 0; d
< DIM
; d
++)
735 s
[i
].step
[d
] = s
[i
].k_1
;
737 step_min
= std::min(step_min
, s
[i
].step
[d
]);
738 step_max
= std::max(step_max
, s
[i
].step
[d
]);
744 for (d
= 0; d
< DIM
; d
++)
746 dx
= (*xcur
)[shell
][d
] - s
[i
].xold
[d
];
747 df
= (*f
)[shell
][d
] - s
[i
].fold
[d
];
748 /* -dx/df gets used to generate an interpolated value, but would
749 * cause a NaN if df were binary-equal to zero. Values close to
750 * zero won't cause problems (because of the min() and max()), so
751 * just testing for binary inequality is OK. */
755 /* Scale the step size by a factor interpolated from
756 * step_scale_min to step_scale_max, as k_est goes from 0 to
757 * step_scale_multiple * s[i].step[d] */
759 step_scale_min
* s
[i
].step
[d
] +
760 step_scale_increment
* std::min(step_scale_multiple
* s
[i
].step
[d
], std::max(k_est
, zero
));
765 if (gmx_numzero(dx
)) /* 0 == dx */
767 /* Likely this will never happen, but if it does just
768 * don't scale the step. */
772 s
[i
].step
[d
] *= step_scale_max
;
776 step_min
= std::min(step_min
, s
[i
].step
[d
]);
777 step_max
= std::max(step_max
, s
[i
].step
[d
]);
781 copy_rvec((*xcur
)[shell
], s
[i
].xold
);
782 copy_rvec((*f
)[shell
], s
[i
].fold
);
784 do_1pos3((*xnew
)[shell
], (*xcur
)[shell
], (*f
)[shell
], s
[i
].step
);
788 fprintf(debug
, "shell[%d] = %d\n", i
, shell
);
789 pr_rvec(debug
, 0, "fshell", (*f
)[shell
], DIM
, TRUE
);
790 pr_rvec(debug
, 0, "xold", (*xcur
)[shell
], DIM
, TRUE
);
791 pr_rvec(debug
, 0, "step", s
[i
].step
, DIM
, TRUE
);
792 pr_rvec(debug
, 0, "xnew", (*xnew
)[shell
], DIM
, TRUE
);
796 printf("step %.3e %.3e\n", step_min
, step_max
);
800 static void decrease_step_size(int nshell
, t_shell s
[])
804 for (i
= 0; i
< nshell
; i
++)
806 svmul(0.8, s
[i
].step
, s
[i
].step
);
810 static void print_epot(FILE *fp
, gmx_int64_t mdstep
, int count
, real epot
, real df
,
811 int ndir
, real sf_dir
)
815 fprintf(fp
, "MDStep=%5s/%2d EPot: %12.8e, rmsF: %6.2e",
816 gmx_step_str(mdstep
, buf
), count
, epot
, df
);
819 fprintf(fp
, ", dir. rmsF: %6.2e\n", std::sqrt(sf_dir
/ndir
));
828 static real
rms_force(t_commrec
*cr
, const PaddedRVecVector
*force
, int ns
, t_shell s
[],
829 int ndir
, real
*sf_dir
, real
*Epot
)
832 const rvec
*f
= as_rvec_array(force
->data());
835 for (int i
= 0; i
< ns
; i
++)
837 int shell
= s
[i
].shell
;
838 buf
[0] += norm2(f
[shell
]);
847 gmx_sumd(4, buf
, cr
);
848 ntot
= (int)(buf
[1] + 0.5);
854 return (ntot
? std::sqrt(buf
[0]/ntot
) : 0);
857 static void check_pbc(FILE *fp
, PaddedRVecVector x
, int shell
)
862 for (m
= 0; (m
< DIM
); m
++)
864 if (fabs(x
[shell
][m
]-x
[now
][m
]) > 0.3)
866 pr_rvecs(fp
, 0, "SHELL-X", as_rvec_array(x
.data())+now
, 5);
872 static void dump_shells(FILE *fp
, PaddedRVecVector x
, PaddedRVecVector f
, real ftol
, int ns
, t_shell s
[])
877 ft2
= gmx::square(ftol
);
879 for (i
= 0; (i
< ns
); i
++)
882 ff2
= iprod(f
[shell
], f
[shell
]);
885 fprintf(fp
, "SHELL %5d, force %10.5f %10.5f %10.5f, |f| %10.5f\n",
886 shell
, f
[shell
][XX
], f
[shell
][YY
], f
[shell
][ZZ
], std::sqrt(ff2
));
888 check_pbc(fp
, x
, shell
);
892 static void init_adir(FILE *log
, gmx_shellfc_t
*shfc
,
893 gmx_constr_t constr
, t_idef
*idef
, t_inputrec
*ir
,
894 t_commrec
*cr
, int dd_ac1
,
895 gmx_int64_t step
, t_mdatoms
*md
, int end
,
896 rvec
*x_old
, rvec
*x_init
, rvec
*x
,
897 rvec
*f
, rvec
*acc_dir
,
898 gmx_bool bMolPBC
, matrix box
,
899 gmx::ArrayRef
<const real
> lambda
, real
*dvdlambda
,
905 unsigned short *ptype
;
907 if (DOMAINDECOMP(cr
))
915 if (n
> shfc
->adir_nalloc
)
917 shfc
->adir_nalloc
= over_alloc_dd(n
);
918 srenew(shfc
->adir_xnold
, shfc
->adir_nalloc
);
919 srenew(shfc
->adir_xnew
, shfc
->adir_nalloc
);
921 xnold
= shfc
->adir_xnold
;
922 xnew
= shfc
->adir_xnew
;
928 /* Does NOT work with freeze or acceleration groups (yet) */
929 for (n
= 0; n
< end
; n
++)
931 w_dt
= md
->invmass
[n
]*dt
;
933 for (d
= 0; d
< DIM
; d
++)
935 if ((ptype
[n
] != eptVSite
) && (ptype
[n
] != eptShell
))
937 xnold
[n
][d
] = x
[n
][d
] - (x_init
[n
][d
] - x_old
[n
][d
]);
938 xnew
[n
][d
] = 2*x
[n
][d
] - x_old
[n
][d
] + f
[n
][d
]*w_dt
*dt
;
942 xnold
[n
][d
] = x
[n
][d
];
943 xnew
[n
][d
] = x
[n
][d
];
947 constrain(log
, FALSE
, FALSE
, constr
, idef
, ir
, cr
, step
, 0, 1.0, md
,
948 x
, xnold
, nullptr, bMolPBC
, box
,
949 lambda
[efptBONDED
], &(dvdlambda
[efptBONDED
]),
950 nullptr, nullptr, nrnb
, econqCoord
);
951 constrain(log
, FALSE
, FALSE
, constr
, idef
, ir
, cr
, step
, 0, 1.0, md
,
952 x
, xnew
, nullptr, bMolPBC
, box
,
953 lambda
[efptBONDED
], &(dvdlambda
[efptBONDED
]),
954 nullptr, nullptr, nrnb
, econqCoord
);
956 for (n
= 0; n
< end
; n
++)
958 for (d
= 0; d
< DIM
; d
++)
961 -(2*x
[n
][d
]-xnold
[n
][d
]-xnew
[n
][d
])/gmx::square(dt
)
962 - f
[n
][d
]*md
->invmass
[n
];
964 clear_rvec(acc_dir
[n
]);
967 /* Project the acceleration on the old bond directions */
968 constrain(log
, FALSE
, FALSE
, constr
, idef
, ir
, cr
, step
, 0, 1.0, md
,
969 x_old
, xnew
, acc_dir
, bMolPBC
, box
,
970 lambda
[efptBONDED
], &(dvdlambda
[efptBONDED
]),
971 nullptr, nullptr, nrnb
, econqDeriv_FlexCon
);
974 void relax_shell_flexcon(FILE *fplog
, t_commrec
*cr
, gmx_bool bVerbose
,
975 gmx_int64_t mdstep
, t_inputrec
*inputrec
,
976 gmx_bool bDoNS
, int force_flags
,
979 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
980 t_state
*state
, PaddedRVecVector
*f
,
983 t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
985 gmx_groups_t
*groups
,
989 double t
, rvec mu_tot
,
991 DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion
,
992 DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion
)
997 rvec
*acc_dir
= nullptr, *x_old
= nullptr;
1002 gmx_bool bCont
, bInit
, bConverged
;
1003 int nat
, dd_ac0
, dd_ac1
= 0, i
;
1004 int homenr
= md
->homenr
, end
= homenr
, cg0
, cg1
;
1005 int nflexcon
, number_steps
, d
, Min
= 0, count
= 0;
1006 #define Try (1-Min) /* At start Try = 1 */
1008 bCont
= (mdstep
== inputrec
->init_step
) && inputrec
->bContinuation
;
1009 bInit
= (mdstep
== inputrec
->init_step
) || shfc
->bRequireInit
;
1010 ftol
= inputrec
->em_tol
;
1011 number_steps
= inputrec
->niter
;
1012 nshell
= shfc
->nshell
;
1013 shell
= shfc
->shell
;
1014 nflexcon
= shfc
->nflexcon
;
1018 if (DOMAINDECOMP(cr
))
1020 nat
= dd_natoms_vsite(cr
->dd
);
1023 dd_get_constraint_range(cr
->dd
, &dd_ac0
, &dd_ac1
);
1024 nat
= std::max(nat
, dd_ac1
);
1029 nat
= state
->natoms
;
1032 for (i
= 0; (i
< 2); i
++)
1034 shfc
->x
[i
].resize(gmx::paddedRVecVectorSize(nat
));
1035 shfc
->f
[i
].resize(gmx::paddedRVecVectorSize(nat
));
1038 /* Create pointer that we can swap */
1039 PaddedRVecVector
*pos
[2];
1040 PaddedRVecVector
*force
[2];
1041 for (i
= 0; (i
< 2); i
++)
1043 pos
[i
] = &shfc
->x
[i
];
1044 force
[i
] = &shfc
->f
[i
];
1047 if (bDoNS
&& inputrec
->ePBC
!= epbcNONE
&& !DOMAINDECOMP(cr
))
1049 /* This is the only time where the coordinates are used
1050 * before do_force is called, which normally puts all
1051 * charge groups in the box.
1053 if (inputrec
->cutoff_scheme
== ecutsVERLET
)
1055 put_atoms_in_box_omp(fr
->ePBC
, state
->box
, md
->homenr
, as_rvec_array(state
->x
.data()));
1061 put_charge_groups_in_box(fplog
, cg0
, cg1
, fr
->ePBC
, state
->box
,
1062 &(top
->cgs
), as_rvec_array(state
->x
.data()), fr
->cg_cm
);
1067 mk_mshift(fplog
, graph
, fr
->ePBC
, state
->box
, as_rvec_array(state
->x
.data()));
1071 /* After this all coordinate arrays will contain whole charge groups */
1074 shift_self(graph
, state
->box
, as_rvec_array(state
->x
.data()));
1079 if (nat
> shfc
->flex_nalloc
)
1081 shfc
->flex_nalloc
= over_alloc_dd(nat
);
1082 srenew(shfc
->acc_dir
, shfc
->flex_nalloc
);
1083 srenew(shfc
->x_old
, shfc
->flex_nalloc
);
1085 acc_dir
= shfc
->acc_dir
;
1086 x_old
= shfc
->x_old
;
1087 for (i
= 0; i
< homenr
; i
++)
1089 for (d
= 0; d
< DIM
; d
++)
1092 state
->x
[i
][d
] - state
->v
[i
][d
]*inputrec
->delta_t
;
1097 /* Do a prediction of the shell positions, when appropriate.
1098 * Without velocities (EM, NM, BD) we only do initial prediction.
1100 if (shfc
->bPredict
&& !bCont
&& (EI_STATE_VELOCITY(inputrec
->eI
) || bInit
))
1102 predict_shells(fplog
, as_rvec_array(state
->x
.data()), as_rvec_array(state
->v
.data()), inputrec
->delta_t
, nshell
, shell
,
1103 md
->massT
, nullptr, bInit
);
1106 /* do_force expected the charge groups to be in the box */
1109 unshift_self(graph
, state
->box
, as_rvec_array(state
->x
.data()));
1112 /* Calculate the forces first time around */
1115 pr_rvecs(debug
, 0, "x b4 do_force", as_rvec_array(state
->x
.data()), homenr
);
1117 do_force(fplog
, cr
, inputrec
, mdstep
, nrnb
, wcycle
, top
, groups
,
1118 state
->box
, &state
->x
, &state
->hist
,
1119 force
[Min
], force_vir
, md
, enerd
, fcd
,
1120 state
->lambda
, graph
,
1121 fr
, vsite
, mu_tot
, t
, nullptr, bBornRadii
,
1122 (bDoNS
? GMX_FORCE_NS
: 0) | force_flags
,
1123 ddOpenBalanceRegion
, ddCloseBalanceRegion
);
1128 init_adir(fplog
, shfc
,
1129 constr
, idef
, inputrec
, cr
, dd_ac1
, mdstep
, md
, end
,
1130 shfc
->x_old
, as_rvec_array(state
->x
.data()), as_rvec_array(state
->x
.data()), as_rvec_array(force
[Min
]->data()),
1132 fr
->bMolPBC
, state
->box
, state
->lambda
, &dum
, nrnb
);
1134 for (i
= 0; i
< end
; i
++)
1136 sf_dir
+= md
->massT
[i
]*norm2(shfc
->acc_dir
[i
]);
1140 Epot
[Min
] = enerd
->term
[F_EPOT
];
1142 df
[Min
] = rms_force(cr
, &shfc
->f
[Min
], nshell
, shell
, nflexcon
, &sf_dir
, &Epot
[Min
]);
1146 fprintf(debug
, "df = %g %g\n", df
[Min
], df
[Try
]);
1151 pr_rvecs(debug
, 0, "force0", as_rvec_array(force
[Min
]->data()), md
->nr
);
1154 if (nshell
+nflexcon
> 0)
1156 /* Copy x to pos[Min] & pos[Try]: during minimization only the
1157 * shell positions are updated, therefore the other particles must
1160 *pos
[Min
] = state
->x
;
1161 *pos
[Try
] = state
->x
;
1164 if (bVerbose
&& MASTER(cr
))
1166 print_epot(stdout
, mdstep
, 0, Epot
[Min
], df
[Min
], nflexcon
, sf_dir
);
1171 fprintf(debug
, "%17s: %14.10e\n",
1172 interaction_function
[F_EKIN
].longname
, enerd
->term
[F_EKIN
]);
1173 fprintf(debug
, "%17s: %14.10e\n",
1174 interaction_function
[F_EPOT
].longname
, enerd
->term
[F_EPOT
]);
1175 fprintf(debug
, "%17s: %14.10e\n",
1176 interaction_function
[F_ETOT
].longname
, enerd
->term
[F_ETOT
]);
1177 fprintf(debug
, "SHELLSTEP %s\n", gmx_step_str(mdstep
, sbuf
));
1180 /* First check whether we should do shells, or whether the force is
1181 * low enough even without minimization.
1183 bConverged
= (df
[Min
] < ftol
);
1185 for (count
= 1; (!(bConverged
) && (count
< number_steps
)); count
++)
1189 construct_vsites(vsite
, as_rvec_array(pos
[Min
]->data()),
1190 inputrec
->delta_t
, as_rvec_array(state
->v
.data()),
1191 idef
->iparams
, idef
->il
,
1192 fr
->ePBC
, fr
->bMolPBC
, cr
, state
->box
);
1197 init_adir(fplog
, shfc
,
1198 constr
, idef
, inputrec
, cr
, dd_ac1
, mdstep
, md
, end
,
1199 x_old
, as_rvec_array(state
->x
.data()), as_rvec_array(pos
[Min
]->data()), as_rvec_array(force
[Min
]->data()), acc_dir
,
1200 fr
->bMolPBC
, state
->box
, state
->lambda
, &dum
, nrnb
);
1202 directional_sd(pos
[Min
], pos
[Try
], acc_dir
, end
, fr
->fc_stepsize
);
1205 /* New positions, Steepest descent */
1206 shell_pos_sd(pos
[Min
], pos
[Try
], force
[Min
], nshell
, shell
, count
);
1208 /* do_force expected the charge groups to be in the box */
1211 unshift_self(graph
, state
->box
, as_rvec_array(pos
[Try
]->data()));
1216 pr_rvecs(debug
, 0, "RELAX: pos[Min] ", as_rvec_array(pos
[Min
]->data()), homenr
);
1217 pr_rvecs(debug
, 0, "RELAX: pos[Try] ", as_rvec_array(pos
[Try
]->data()), homenr
);
1219 /* Try the new positions */
1220 do_force(fplog
, cr
, inputrec
, 1, nrnb
, wcycle
,
1221 top
, groups
, state
->box
, pos
[Try
], &state
->hist
,
1222 force
[Try
], force_vir
,
1223 md
, enerd
, fcd
, state
->lambda
, graph
,
1224 fr
, vsite
, mu_tot
, t
, nullptr, bBornRadii
,
1226 ddOpenBalanceRegion
, ddCloseBalanceRegion
);
1230 pr_rvecs(debug
, 0, "RELAX: force[Min]", as_rvec_array(force
[Min
]->data()), homenr
);
1231 pr_rvecs(debug
, 0, "RELAX: force[Try]", as_rvec_array(force
[Try
]->data()), homenr
);
1236 init_adir(fplog
, shfc
,
1237 constr
, idef
, inputrec
, cr
, dd_ac1
, mdstep
, md
, end
,
1238 x_old
, as_rvec_array(state
->x
.data()), as_rvec_array(pos
[Try
]->data()), as_rvec_array(force
[Try
]->data()), acc_dir
,
1239 fr
->bMolPBC
, state
->box
, state
->lambda
, &dum
, nrnb
);
1241 for (i
= 0; i
< end
; i
++)
1243 sf_dir
+= md
->massT
[i
]*norm2(acc_dir
[i
]);
1247 Epot
[Try
] = enerd
->term
[F_EPOT
];
1249 df
[Try
] = rms_force(cr
, force
[Try
], nshell
, shell
, nflexcon
, &sf_dir
, &Epot
[Try
]);
1253 fprintf(debug
, "df = %g %g\n", df
[Min
], df
[Try
]);
1260 pr_rvecs(debug
, 0, "F na do_force", as_rvec_array(force
[Try
]->data()), homenr
);
1264 fprintf(debug
, "SHELL ITER %d\n", count
);
1265 dump_shells(debug
, *pos
[Try
], *force
[Try
], ftol
, nshell
, shell
);
1269 if (bVerbose
&& MASTER(cr
))
1271 print_epot(stdout
, mdstep
, count
, Epot
[Try
], df
[Try
], nflexcon
, sf_dir
);
1274 bConverged
= (df
[Try
] < ftol
);
1276 if ((df
[Try
] < df
[Min
]))
1280 fprintf(debug
, "Swapping Min and Try\n");
1284 /* Correct the velocities for the flexible constraints */
1285 invdt
= 1/inputrec
->delta_t
;
1286 for (i
= 0; i
< end
; i
++)
1288 for (d
= 0; d
< DIM
; d
++)
1290 state
->v
[i
][d
] += (pos
[Try
][i
][d
] - pos
[Min
][i
][d
])*invdt
;
1298 decrease_step_size(nshell
, shell
);
1301 shfc
->numForceEvaluations
+= count
;
1304 shfc
->numConvergedIterations
++;
1306 if (MASTER(cr
) && !(bConverged
))
1308 /* Note that the energies and virial are incorrect when not converged */
1312 "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
1313 gmx_step_str(mdstep
, sbuf
), number_steps
, df
[Min
]);
1316 "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
1317 gmx_step_str(mdstep
, sbuf
), number_steps
, df
[Min
]);
1320 /* Copy back the coordinates and the forces */
1321 state
->x
= *pos
[Min
];
1325 void done_shellfc(FILE *fplog
, gmx_shellfc_t
*shfc
, gmx_int64_t numSteps
)
1327 if (shfc
&& fplog
&& numSteps
> 0)
1329 double numStepsAsDouble
= static_cast<double>(numSteps
);
1330 fprintf(fplog
, "Fraction of iterations that converged: %.2f %%\n",
1331 (shfc
->numConvergedIterations
*100.0)/numStepsAsDouble
);
1332 fprintf(fplog
, "Average number of force evaluations per MD step: %.2f\n\n",
1333 shfc
->numForceEvaluations
/numStepsAsDouble
);
1336 // TODO Deallocate memory in shfc