2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
7 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "gromacs/ewald/pme.h"
47 #include "gromacs/gpu_utils/hostallocator.h"
48 #include "gromacs/math/functions.h"
49 #include "gromacs/mdlib/gmx_omp_nthreads.h"
50 #include "gromacs/mdlib/qmmm.h"
51 #include "gromacs/mdtypes/inputrec.h"
52 #include "gromacs/mdtypes/md_enums.h"
53 #include "gromacs/topology/mtop_lookup.h"
54 #include "gromacs/topology/mtop_util.h"
55 #include "gromacs/topology/topology.h"
56 #include "gromacs/utility/exceptions.h"
57 #include "gromacs/utility/smalloc.h"
59 #define ALMOST_ZERO 1e-30
64 MDAtoms::MDAtoms() : mdatoms_(nullptr) {}
68 if (mdatoms_
== nullptr)
72 sfree(mdatoms_
->massA
);
73 sfree(mdatoms_
->massB
);
74 sfree(mdatoms_
->massT
);
75 gmx::AlignedAllocationPolicy::free(mdatoms_
->invmass
);
76 sfree(mdatoms_
->invMassPerDim
);
77 sfree(mdatoms_
->typeA
);
78 sfree(mdatoms_
->chargeB
);
79 sfree(mdatoms_
->typeB
);
80 sfree(mdatoms_
->sqrt_c6A
);
81 sfree(mdatoms_
->sigmaA
);
82 sfree(mdatoms_
->sigma3A
);
83 sfree(mdatoms_
->sqrt_c6B
);
84 sfree(mdatoms_
->sigmaB
);
85 sfree(mdatoms_
->sigma3B
);
86 sfree(mdatoms_
->ptype
);
88 sfree(mdatoms_
->cENER
);
89 sfree(mdatoms_
->cACC
);
90 sfree(mdatoms_
->cFREEZE
);
91 sfree(mdatoms_
->cVCM
);
92 sfree(mdatoms_
->cORF
);
93 sfree(mdatoms_
->bPerturbed
);
99 void MDAtoms::resize(int newSize
)
101 chargeA_
.resizeWithPadding(newSize
);
102 mdatoms_
->chargeA
= chargeA_
.data();
105 void MDAtoms::reserve(int newCapacity
)
107 chargeA_
.reserveWithPadding(newCapacity
);
108 mdatoms_
->chargeA
= chargeA_
.data();
111 std::unique_ptr
<MDAtoms
> makeMDAtoms(FILE* fp
, const gmx_mtop_t
& mtop
, const t_inputrec
& ir
, const bool rankHasPmeGpuTask
)
113 auto mdAtoms
= std::make_unique
<MDAtoms
>();
114 // GPU transfers may want to use a suitable pinning mode.
115 if (rankHasPmeGpuTask
)
117 changePinningPolicy(&mdAtoms
->chargeA_
, pme_get_pinning_policy());
121 mdAtoms
->mdatoms_
.reset(md
);
123 md
->nenergrp
= mtop
.groups
.groups
[SimulationAtomGroupType::EnergyOutput
].size();
124 md
->bVCMgrps
= FALSE
;
125 for (int i
= 0; i
< mtop
.natoms
; i
++)
127 if (getGroupType(mtop
.groups
, SimulationAtomGroupType::MassCenterVelocityRemoval
, i
) > 0)
133 /* Determine the total system mass and perturbed atom counts */
134 double totalMassA
= 0.0;
135 double totalMassB
= 0.0;
137 md
->haveVsites
= FALSE
;
138 gmx_mtop_atomloop_block_t aloop
= gmx_mtop_atomloop_block_init(&mtop
);
141 while (gmx_mtop_atomloop_block_next(aloop
, &atom
, &nmol
))
143 totalMassA
+= nmol
* atom
->m
;
144 totalMassB
+= nmol
* atom
->mB
;
146 if (atom
->ptype
== eptVSite
)
148 md
->haveVsites
= TRUE
;
151 if (ir
.efep
!= efepNO
&& PERTURBED(*atom
))
154 if (atom
->mB
!= atom
->m
)
156 md
->nMassPerturbed
+= nmol
;
158 if (atom
->qB
!= atom
->q
)
160 md
->nChargePerturbed
+= nmol
;
162 if (atom
->typeB
!= atom
->type
)
164 md
->nTypePerturbed
+= nmol
;
169 md
->tmassA
= totalMassA
;
170 md
->tmassB
= totalMassB
;
172 if (ir
.efep
!= efepNO
&& fp
)
174 fprintf(fp
, "There are %d atoms and %d charges for free energy perturbation\n",
175 md
->nPerturbed
, md
->nChargePerturbed
);
178 md
->havePartiallyFrozenAtoms
= FALSE
;
179 for (int g
= 0; g
< ir
.opts
.ngfrz
; g
++)
181 for (int d
= YY
; d
< DIM
; d
++)
183 if (ir
.opts
.nFreeze
[g
][d
] != ir
.opts
.nFreeze
[g
][XX
])
185 md
->havePartiallyFrozenAtoms
= TRUE
;
190 md
->bOrires
= (gmx_mtop_ftype_count(&mtop
, F_ORIRES
) != 0);
197 void atoms2md(const gmx_mtop_t
* mtop
, const t_inputrec
* ir
, int nindex
, const int* index
, int homenr
, gmx::MDAtoms
* mdAtoms
)
200 const t_grpopts
* opts
;
201 int nthreads gmx_unused
;
203 bLJPME
= EVDW_PME(ir
->vdwtype
);
207 const SimulationGroups
& groups
= mtop
->groups
;
209 auto md
= mdAtoms
->mdatoms();
210 /* nindex>=0 indicates DD where we use an index */
217 md
->nr
= mtop
->natoms
;
220 if (md
->nr
> md
->nalloc
)
222 md
->nalloc
= over_alloc_dd(md
->nr
);
224 if (md
->nMassPerturbed
)
226 srenew(md
->massA
, md
->nalloc
);
227 srenew(md
->massB
, md
->nalloc
);
229 srenew(md
->massT
, md
->nalloc
);
230 /* The SIMD version of the integrator needs this aligned and padded.
231 * The padding needs to be with zeros, which we set later below.
233 gmx::AlignedAllocationPolicy::free(md
->invmass
);
234 md
->invmass
= new (gmx::AlignedAllocationPolicy::malloc(
235 (md
->nalloc
+ GMX_REAL_MAX_SIMD_WIDTH
) * sizeof(*md
->invmass
))) real
;
236 srenew(md
->invMassPerDim
, md
->nalloc
);
237 // TODO eventually we will have vectors and just resize
238 // everything, but for now the semantics of md->nalloc being
239 // the capacity are preserved by keeping vectors within
240 // mdAtoms having the same properties as the other arrays.
241 mdAtoms
->reserve(md
->nalloc
);
242 mdAtoms
->resize(md
->nr
);
243 srenew(md
->typeA
, md
->nalloc
);
246 srenew(md
->chargeB
, md
->nalloc
);
247 srenew(md
->typeB
, md
->nalloc
);
251 srenew(md
->sqrt_c6A
, md
->nalloc
);
252 srenew(md
->sigmaA
, md
->nalloc
);
253 srenew(md
->sigma3A
, md
->nalloc
);
256 srenew(md
->sqrt_c6B
, md
->nalloc
);
257 srenew(md
->sigmaB
, md
->nalloc
);
258 srenew(md
->sigma3B
, md
->nalloc
);
261 srenew(md
->ptype
, md
->nalloc
);
264 srenew(md
->cTC
, md
->nalloc
);
265 /* We always copy cTC with domain decomposition */
267 srenew(md
->cENER
, md
->nalloc
);
270 srenew(md
->cACC
, md
->nalloc
);
273 && (opts
->ngfrz
> 1 || opts
->nFreeze
[0][XX
] || opts
->nFreeze
[0][YY
] || opts
->nFreeze
[0][ZZ
]))
275 srenew(md
->cFREEZE
, md
->nalloc
);
279 srenew(md
->cVCM
, md
->nalloc
);
283 srenew(md
->cORF
, md
->nalloc
);
287 srenew(md
->bPerturbed
, md
->nalloc
);
290 /* Note that these user t_mdatoms array pointers are NULL
291 * when there is only one group present.
292 * Therefore, when adding code, the user should use something like:
293 * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
295 if (!mtop
->groups
.groupNumbers
[SimulationAtomGroupType::User1
].empty())
297 srenew(md
->cU1
, md
->nalloc
);
299 if (!mtop
->groups
.groupNumbers
[SimulationAtomGroupType::User2
].empty())
301 srenew(md
->cU2
, md
->nalloc
);
306 srenew(md
->bQM
, md
->nalloc
);
312 nthreads
= gmx_omp_nthreads_get(emntDefault
);
313 #pragma omp parallel for num_threads(nthreads) schedule(static) firstprivate(molb)
314 for (int i
= 0; i
< md
->nr
; i
++)
322 if (index
== nullptr)
330 const t_atom
& atom
= mtopGetAtomParameters(mtop
, ag
, &molb
);
334 md
->cFREEZE
[i
] = getGroupType(groups
, SimulationAtomGroupType::Freeze
, ag
);
336 if (EI_ENERGY_MINIMIZATION(ir
->eI
))
338 /* Displacement is proportional to F, masses used for constraints */
342 else if (ir
->eI
== eiBD
)
344 /* With BD the physical masses are irrelevant.
345 * To keep the code simple we use most of the normal MD code path
346 * for BD. Thus for constraining the masses should be proportional
347 * to the friction coefficient. We set the absolute value such that
348 * m/2<(dx/dt)^2> = m/2*2kT/fric*dt = kT/2 => m=fric*dt/2
349 * Then if we set the (meaningless) velocity to v=dx/dt, we get the
350 * correct kinetic energy and temperature using the usual code path.
351 * Thus with BD v*dt will give the displacement and the reported
352 * temperature can signal bad integration (too large time step).
356 mA
= 0.5 * ir
->bd_fric
* ir
->delta_t
;
357 mB
= 0.5 * ir
->bd_fric
* ir
->delta_t
;
361 /* The friction coefficient is mass/tau_t */
363 / opts
->tau_t
[md
->cTC
? groups
.groupNumbers
[SimulationAtomGroupType::TemperatureCoupling
][ag
] : 0];
364 mA
= 0.5 * atom
.m
* fac
;
365 mB
= 0.5 * atom
.mB
* fac
;
373 if (md
->nMassPerturbed
)
383 md
->invMassPerDim
[i
][XX
] = 0;
384 md
->invMassPerDim
[i
][YY
] = 0;
385 md
->invMassPerDim
[i
][ZZ
] = 0;
387 else if (md
->cFREEZE
)
390 GMX_ASSERT(opts
->nFreeze
!= nullptr,
391 "Must have freeze groups to initialize masses");
392 if (opts
->nFreeze
[g
][XX
] && opts
->nFreeze
[g
][YY
] && opts
->nFreeze
[g
][ZZ
])
394 /* Set the mass of completely frozen particles to ALMOST_ZERO
395 * iso 0 to avoid div by zero in lincs or shake.
397 md
->invmass
[i
] = ALMOST_ZERO
;
401 /* Note: Partially frozen particles use the normal invmass.
402 * If such particles are constrained, the frozen dimensions
403 * should not be updated with the constrained coordinates.
405 md
->invmass
[i
] = 1.0 / mA
;
407 for (int d
= 0; d
< DIM
; d
++)
409 md
->invMassPerDim
[i
][d
] = (opts
->nFreeze
[g
][d
] ? 0 : 1.0 / mA
);
414 md
->invmass
[i
] = 1.0 / mA
;
415 for (int d
= 0; d
< DIM
; d
++)
417 md
->invMassPerDim
[i
][d
] = 1.0 / mA
;
421 md
->chargeA
[i
] = atom
.q
;
422 md
->typeA
[i
] = atom
.type
;
425 c6
= mtop
->ffparams
.iparams
[atom
.type
* (mtop
->ffparams
.atnr
+ 1)].lj
.c6
;
426 c12
= mtop
->ffparams
.iparams
[atom
.type
* (mtop
->ffparams
.atnr
+ 1)].lj
.c12
;
427 md
->sqrt_c6A
[i
] = sqrt(c6
);
428 if (c6
== 0.0 || c12
== 0)
434 md
->sigmaA
[i
] = gmx::sixthroot(c12
/ c6
);
436 md
->sigma3A
[i
] = 1 / (md
->sigmaA
[i
] * md
->sigmaA
[i
] * md
->sigmaA
[i
]);
440 md
->bPerturbed
[i
] = PERTURBED(atom
);
441 md
->chargeB
[i
] = atom
.qB
;
442 md
->typeB
[i
] = atom
.typeB
;
445 c6
= mtop
->ffparams
.iparams
[atom
.typeB
* (mtop
->ffparams
.atnr
+ 1)].lj
.c6
;
446 c12
= mtop
->ffparams
.iparams
[atom
.typeB
* (mtop
->ffparams
.atnr
+ 1)].lj
.c12
;
447 md
->sqrt_c6B
[i
] = sqrt(c6
);
448 if (c6
== 0.0 || c12
== 0)
454 md
->sigmaB
[i
] = gmx::sixthroot(c12
/ c6
);
456 md
->sigma3B
[i
] = 1 / (md
->sigmaB
[i
] * md
->sigmaB
[i
] * md
->sigmaB
[i
]);
459 md
->ptype
[i
] = atom
.ptype
;
462 md
->cTC
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::TemperatureCoupling
][ag
];
464 md
->cENER
[i
] = getGroupType(groups
, SimulationAtomGroupType::EnergyOutput
, ag
);
467 md
->cACC
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::Acceleration
][ag
];
471 md
->cVCM
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::MassCenterVelocityRemoval
][ag
];
475 md
->cORF
[i
] = getGroupType(groups
, SimulationAtomGroupType::OrientationRestraintsFit
, ag
);
480 md
->cU1
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::User1
][ag
];
484 md
->cU2
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::User2
][ag
];
489 if (groups
.groupNumbers
[SimulationAtomGroupType::QuantumMechanics
].empty()
490 || groups
.groupNumbers
[SimulationAtomGroupType::QuantumMechanics
][ag
]
491 < groups
.groups
[SimulationAtomGroupType::QuantumMechanics
].size() - 1)
501 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
506 /* Pad invmass with 0 so a SIMD MD update does not change v and x */
507 for (int i
= md
->nr
; i
< md
->nr
+ GMX_REAL_MAX_SIMD_WIDTH
; i
++)
514 /* We set mass, invmass, invMassPerDim and tmass for lambda=0.
515 * For free-energy runs, these should be updated using update_mdatoms().
517 md
->tmass
= md
->tmassA
;
521 void update_mdatoms(t_mdatoms
* md
, real lambda
)
523 if (md
->nMassPerturbed
&& lambda
!= md
->lambda
)
525 real L1
= 1 - lambda
;
527 /* Update masses of perturbed atoms for the change in lambda */
528 int gmx_unused nthreads
= gmx_omp_nthreads_get(emntDefault
);
529 #pragma omp parallel for num_threads(nthreads) schedule(static)
530 for (int i
= 0; i
< md
->nr
; i
++)
532 if (md
->bPerturbed
[i
])
534 md
->massT
[i
] = L1
* md
->massA
[i
] + lambda
* md
->massB
[i
];
535 /* Atoms with invmass 0 or ALMOST_ZERO are massless or frozen
536 * and their invmass does not depend on lambda.
538 if (md
->invmass
[i
] > 1.1 * ALMOST_ZERO
)
540 md
->invmass
[i
] = 1.0 / md
->massT
[i
];
541 for (int d
= 0; d
< DIM
; d
++)
543 if (md
->invMassPerDim
[i
][d
] > 1.1 * ALMOST_ZERO
)
545 md
->invMassPerDim
[i
][d
] = md
->invmass
[i
];
552 /* Update the system mass for the change in lambda */
553 md
->tmass
= L1
* md
->tmassA
+ lambda
* md
->tmassB
;