2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
44 #include "gromacs/fileio/confio.h"
45 #include "gromacs/gmxlib/network.h"
46 #include "gromacs/math/functions.h"
47 #include "gromacs/math/utilities.h"
48 #include "gromacs/math/vec.h"
49 #include "gromacs/mdtypes/commrec.h"
50 #include "gromacs/mdtypes/inputrec.h"
51 #include "gromacs/mdtypes/md_enums.h"
52 #include "gromacs/mdtypes/mdatom.h"
53 #include "gromacs/mdtypes/state.h"
54 #include "gromacs/pbcutil/pbc.h"
55 #include "gromacs/pulling/pull.h"
56 #include "gromacs/utility/fatalerror.h"
57 #include "gromacs/utility/futil.h"
58 #include "gromacs/utility/gmxassert.h"
59 #include "gromacs/utility/real.h"
60 #include "gromacs/utility/smalloc.h"
62 #include "pull_internal.h"
66 // Helper function to deduce MPI datatype from the type of data
67 gmx_unused
static MPI_Datatype
mpiDatatype(const float gmx_unused
*data
)
72 // Helper function to deduce MPI datatype from the type of data
73 gmx_unused
static MPI_Datatype
mpiDatatype(const double gmx_unused
*data
)
81 // Helper function; note that gmx_sum(d) should actually be templated
82 gmx_unused
static void gmxAllReduce(int n
, real
*data
, const t_commrec
*cr
)
88 // Helper function; note that gmx_sum(d) should actually be templated
89 gmx_unused
static void gmxAllReduce(int n
, double *data
, const t_commrec
*cr
)
91 gmx_sumd(n
, data
, cr
);
94 // Reduce data of n elements over all ranks currently participating in pull
96 static void pullAllReduce(const t_commrec
*cr
,
101 if (cr
!= nullptr && PAR(cr
))
103 if (comm
->bParticipateAll
)
105 /* Sum the contributions over all DD ranks */
106 gmxAllReduce(n
, data
, cr
);
110 /* Separate branch because gmx_sum uses cr->mpi_comm_mygroup */
112 #if MPI_IN_PLACE_EXISTS
113 MPI_Allreduce(MPI_IN_PLACE
, data
, n
, mpiDatatype(data
), MPI_SUM
,
116 std::vector
<T
> buf(n
);
118 MPI_Allreduce(data
, buf
.data(), n
, mpiDatatype(data
), MPI_SUM
,
121 /* Copy the result from the buffer to the input/output data */
122 for (int i
= 0; i
< n
; i
++)
128 gmx_incons("comm->bParticipateAll=FALSE without GMX_MPI");
134 /* Copies the coordinates of the PBC atom of pgrp to x_pbc.
135 * When those coordinates are not available on this rank, clears x_pbc.
137 static void setPbcAtomCoords(const pull_group_work_t
&pgrp
,
141 if (pgrp
.pbcAtomSet
!= nullptr)
143 if (pgrp
.pbcAtomSet
->numAtomsLocal() > 0)
145 /* We have the atom locally, copy its coordinates */
146 copy_rvec(x
[pgrp
.pbcAtomSet
->localIndex()[0]], x_pbc
);
150 /* Another rank has it, clear the coordinates for MPI_Allreduce */
156 copy_rvec(x
[pgrp
.params
.pbcatom
], x_pbc
);
160 static void pull_set_pbcatoms(const t_commrec
*cr
, struct pull_t
*pull
,
162 gmx::ArrayRef
<gmx::RVec
> x_pbc
)
165 for (size_t g
= 0; g
< pull
->group
.size(); g
++)
167 const pull_group_work_t
&group
= pull
->group
[g
];
168 if (group
.needToCalcCom
&& (group
.epgrppbc
== epgrppbcREFAT
|| group
.epgrppbc
== epgrppbcPREVSTEPCOM
))
170 setPbcAtomCoords(pull
->group
[g
], x
, x_pbc
[g
]);
175 clear_rvec(x_pbc
[g
]);
179 if (cr
&& PAR(cr
) && numPbcAtoms
> 0)
181 /* Sum over participating ranks to get x_pbc from the home ranks.
182 * This can be very expensive at high parallelization, so we only
183 * do this after each DD repartitioning.
185 pullAllReduce(cr
, &pull
->comm
, pull
->group
.size()*DIM
,
186 static_cast<real
*>(x_pbc
[0]));
190 static void make_cyl_refgrps(const t_commrec
*cr
,
197 pull_comm_t
*comm
= &pull
->comm
;
199 GMX_ASSERT(comm
->cylinderBuffer
.size() == pull
->coord
.size()*c_cylinderBufferStride
, "cylinderBuffer should have the correct size");
201 double inv_cyl_r2
= 1.0/gmx::square(pull
->params
.cylinder_r
);
203 /* loop over all groups to make a reference group for each*/
204 for (size_t c
= 0; c
< pull
->coord
.size(); c
++)
206 pull_coord_work_t
*pcrd
;
207 double sum_a
, wmass
, wwmass
;
208 dvec radf_fac0
, radf_fac1
;
210 pcrd
= &pull
->coord
[c
];
215 clear_dvec(radf_fac0
);
216 clear_dvec(radf_fac1
);
218 if (pcrd
->params
.eGeom
== epullgCYL
)
220 /* pref will be the same group for all pull coordinates */
221 const pull_group_work_t
&pref
= pull
->group
[pcrd
->params
.group
[0]];
222 const pull_group_work_t
&pgrp
= pull
->group
[pcrd
->params
.group
[1]];
223 pull_group_work_t
&pdyna
= pull
->dyna
[c
];
225 copy_dvec_to_rvec(pcrd
->spatialData
.vec
, direction
);
227 /* Since we have not calculated the COM of the cylinder group yet,
228 * we calculate distances with respect to location of the pull
229 * group minus the reference position along the vector.
230 * here we already have the COM of the pull group. This resolves
231 * any PBC issues and we don't need to use a PBC-atom here.
233 if (pcrd
->params
.rate
!= 0)
235 /* With rate=0, value_ref is set initially */
236 pcrd
->value_ref
= pcrd
->params
.init
+ pcrd
->params
.rate
*t
;
239 for (int m
= 0; m
< DIM
; m
++)
241 reference
[m
] = pgrp
.x
[m
] - pcrd
->spatialData
.vec
[m
]*pcrd
->value_ref
;
244 auto localAtomIndices
= pref
.atomSet
.localIndex();
246 /* This actually only needs to be done at init or DD time,
247 * but resizing with the same size does not cause much overhead.
249 pdyna
.localWeights
.resize(localAtomIndices
.size());
250 pdyna
.mdw
.resize(localAtomIndices
.size());
251 pdyna
.dv
.resize(localAtomIndices
.size());
253 /* loop over all atoms in the main ref group */
254 for (gmx::index indexInSet
= 0; indexInSet
< localAtomIndices
.ssize(); indexInSet
++)
256 int atomIndex
= localAtomIndices
[indexInSet
];
258 pbc_dx_aiuc(pbc
, x
[atomIndex
], reference
, dx
);
259 double axialLocation
= iprod(direction
, dx
);
262 for (int m
= 0; m
< DIM
; m
++)
264 /* Determine the radial components */
265 radialLocation
[m
] = dx
[m
] - axialLocation
*direction
[m
];
266 dr2
+= gmx::square(radialLocation
[m
]);
268 double dr2_rel
= dr2
*inv_cyl_r2
;
272 /* add atom to sum of COM and to weight array */
274 double mass
= md
->massT
[atomIndex
];
275 /* The radial weight function is 1-2x^2+x^4,
276 * where x=r/cylinder_r. Since this function depends
277 * on the radial component, we also get radial forces
280 double weight
= 1 + (-2 + dr2_rel
)*dr2_rel
;
281 double dweight_r
= (-4 + 4*dr2_rel
)*inv_cyl_r2
;
282 pdyna
.localWeights
[indexInSet
] = weight
;
283 sum_a
+= mass
*weight
*axialLocation
;
284 wmass
+= mass
*weight
;
285 wwmass
+= mass
*weight
*weight
;
287 dsvmul(mass
*dweight_r
, radialLocation
, mdw
);
288 copy_dvec(mdw
, pdyna
.mdw
[indexInSet
]);
289 /* Currently we only have the axial component of the
290 * offset from the cylinder COM up to an unkown offset.
291 * We add this offset after the reduction needed
292 * for determining the COM of the cylinder group.
294 pdyna
.dv
[indexInSet
] = axialLocation
;
295 for (int m
= 0; m
< DIM
; m
++)
297 radf_fac0
[m
] += mdw
[m
];
298 radf_fac1
[m
] += mdw
[m
]*axialLocation
;
303 pdyna
.localWeights
[indexInSet
] = 0;
308 auto buffer
= gmx::arrayRefFromArray(comm
->cylinderBuffer
.data() + c
*c_cylinderBufferStride
, c_cylinderBufferStride
);
314 buffer
[3] = radf_fac0
[XX
];
315 buffer
[4] = radf_fac0
[YY
];
316 buffer
[5] = radf_fac0
[ZZ
];
318 buffer
[6] = radf_fac1
[XX
];
319 buffer
[7] = radf_fac1
[YY
];
320 buffer
[8] = radf_fac1
[ZZ
];
323 if (cr
!= nullptr && PAR(cr
))
325 /* Sum the contributions over the ranks */
326 pullAllReduce(cr
, comm
, pull
->coord
.size()*c_cylinderBufferStride
,
327 comm
->cylinderBuffer
.data());
330 for (size_t c
= 0; c
< pull
->coord
.size(); c
++)
332 pull_coord_work_t
*pcrd
;
334 pcrd
= &pull
->coord
[c
];
336 if (pcrd
->params
.eGeom
== epullgCYL
)
338 pull_group_work_t
*pdyna
= &pull
->dyna
[c
];
339 pull_group_work_t
*pgrp
= &pull
->group
[pcrd
->params
.group
[1]];
340 PullCoordSpatialData
&spatialData
= pcrd
->spatialData
;
342 auto buffer
= gmx::constArrayRefFromArray(comm
->cylinderBuffer
.data() + c
*c_cylinderBufferStride
, c_cylinderBufferStride
);
343 double wmass
= buffer
[0];
344 double wwmass
= buffer
[1];
345 pdyna
->mwscale
= 1.0/wmass
;
346 /* Cylinder pulling can't be used with constraints, but we set
347 * wscale and invtm anyhow, in case someone would like to use them.
349 pdyna
->wscale
= wmass
/wwmass
;
350 pdyna
->invtm
= wwmass
/(wmass
*wmass
);
352 /* We store the deviation of the COM from the reference location
353 * used above, since we need it when we apply the radial forces
354 * to the atoms in the cylinder group.
356 spatialData
.cyl_dev
= 0;
357 for (int m
= 0; m
< DIM
; m
++)
359 double reference
= pgrp
->x
[m
] - spatialData
.vec
[m
]*pcrd
->value_ref
;
360 double dist
= -spatialData
.vec
[m
]*buffer
[2]*pdyna
->mwscale
;
361 pdyna
->x
[m
] = reference
- dist
;
362 spatialData
.cyl_dev
+= dist
;
364 /* Now we know the exact COM of the cylinder reference group,
365 * we can determine the radial force factor (ffrad) that when
366 * multiplied with the axial pull force will give the radial
367 * force on the pulled (non-cylinder) group.
369 for (int m
= 0; m
< DIM
; m
++)
371 spatialData
.ffrad
[m
] = (buffer
[6 + m
] +
372 buffer
[3 + m
]*spatialData
.cyl_dev
)/wmass
;
377 fprintf(debug
, "Pull cylinder group %zu:%8.3f%8.3f%8.3f m:%8.3f\n",
378 c
, pdyna
->x
[0], pdyna
->x
[1],
379 pdyna
->x
[2], 1.0/pdyna
->invtm
);
380 fprintf(debug
, "ffrad %8.3f %8.3f %8.3f\n",
381 spatialData
.ffrad
[XX
], spatialData
.ffrad
[YY
], spatialData
.ffrad
[ZZ
]);
387 static double atan2_0_2pi(double y
, double x
)
399 static void sum_com_part(const pull_group_work_t
*pgrp
,
400 int ind_start
, int ind_end
,
401 const rvec
*x
, const rvec
*xp
,
409 dvec sum_wmx
= { 0, 0, 0 };
410 dvec sum_wmxp
= { 0, 0, 0 };
412 auto localAtomIndices
= pgrp
->atomSet
.localIndex();
413 for (int i
= ind_start
; i
< ind_end
; i
++)
415 int ii
= localAtomIndices
[i
];
417 if (pgrp
->localWeights
.empty())
426 w
= pgrp
->localWeights
[i
];
431 if (pgrp
->epgrppbc
== epgrppbcNONE
)
433 /* Plain COM: sum the coordinates */
434 for (int d
= 0; d
< DIM
; d
++)
436 sum_wmx
[d
] += wm
*x
[ii
][d
];
440 for (int d
= 0; d
< DIM
; d
++)
442 sum_wmxp
[d
] += wm
*xp
[ii
][d
];
450 /* Sum the difference with the reference atom */
451 pbc_dx(pbc
, x
[ii
], x_pbc
, dx
);
452 for (int d
= 0; d
< DIM
; d
++)
454 sum_wmx
[d
] += wm
*dx
[d
];
458 /* For xp add the difference between xp and x to dx,
459 * such that we use the same periodic image,
460 * also when xp has a large displacement.
462 for (int d
= 0; d
< DIM
; d
++)
464 sum_wmxp
[d
] += wm
*(dx
[d
] + xp
[ii
][d
] - x
[ii
][d
]);
470 sum_com
->sum_wm
= sum_wm
;
471 sum_com
->sum_wwm
= sum_wwm
;
472 copy_dvec(sum_wmx
, sum_com
->sum_wmx
);
475 copy_dvec(sum_wmxp
, sum_com
->sum_wmxp
);
479 static void sum_com_part_cosweight(const pull_group_work_t
*pgrp
,
480 int ind_start
, int ind_end
,
481 int cosdim
, real twopi_box
,
482 const rvec
*x
, const rvec
*xp
,
486 /* Cosine weighting geometry */
495 auto localAtomIndices
= pgrp
->atomSet
.localIndex();
497 for (int i
= ind_start
; i
< ind_end
; i
++)
499 int ii
= localAtomIndices
[i
];
501 /* Determine cos and sin sums */
502 real cw
= std::cos(x
[ii
][cosdim
]*twopi_box
);
503 real sw
= std::sin(x
[ii
][cosdim
]*twopi_box
);
504 sum_cm
+= static_cast<double>(cw
*m
);
505 sum_sm
+= static_cast<double>(sw
*m
);
506 sum_ccm
+= static_cast<double>(cw
*cw
*m
);
507 sum_csm
+= static_cast<double>(cw
*sw
*m
);
508 sum_ssm
+= static_cast<double>(sw
*sw
*m
);
512 real cw
= std::cos(xp
[ii
][cosdim
]*twopi_box
);
513 real sw
= std::sin(xp
[ii
][cosdim
]*twopi_box
);
514 sum_cmp
+= static_cast<double>(cw
*m
);
515 sum_smp
+= static_cast<double>(sw
*m
);
519 sum_com
->sum_cm
= sum_cm
;
520 sum_com
->sum_sm
= sum_sm
;
521 sum_com
->sum_ccm
= sum_ccm
;
522 sum_com
->sum_csm
= sum_csm
;
523 sum_com
->sum_ssm
= sum_ssm
;
524 sum_com
->sum_cmp
= sum_cmp
;
525 sum_com
->sum_smp
= sum_smp
;
528 /* calculates center of mass of selection index from all coordinates x */
529 void pull_calc_coms(const t_commrec
*cr
,
534 const rvec x
[], rvec
*xp
)
541 GMX_ASSERT(comm
->pbcAtomBuffer
.size() == pull
->group
.size(), "pbcAtomBuffer should have size number of groups");
542 GMX_ASSERT(comm
->comBuffer
.size() == pull
->group
.size()*c_comBufferStride
,
543 "comBuffer should have size #group*c_comBufferStride");
545 if (pull
->bRefAt
&& pull
->bSetPBCatoms
)
547 pull_set_pbcatoms(cr
, pull
, x
, comm
->pbcAtomBuffer
);
549 if (cr
!= nullptr && DOMAINDECOMP(cr
))
551 /* We can keep these PBC reference coordinates fixed for nstlist
552 * steps, since atoms won't jump over PBC.
553 * This avoids a global reduction at the next nstlist-1 steps.
554 * Note that the exact values of the pbc reference coordinates
555 * are irrelevant, as long all atoms in the group are within
556 * half a box distance of the reference coordinate.
558 pull
->bSetPBCatoms
= FALSE
;
562 if (pull
->cosdim
>= 0)
566 assert(pull
->npbcdim
<= DIM
);
568 for (m
= pull
->cosdim
+1; m
< pull
->npbcdim
; m
++)
570 if (pbc
->box
[m
][pull
->cosdim
] != 0)
572 gmx_fatal(FARGS
, "Can not do cosine weighting for trilinic dimensions");
575 twopi_box
= 2.0*M_PI
/pbc
->box
[pull
->cosdim
][pull
->cosdim
];
578 for (size_t g
= 0; g
< pull
->group
.size(); g
++)
580 pull_group_work_t
*pgrp
= &pull
->group
[g
];
582 /* Cosine-weighted COMs behave different from all other weighted COMs
583 * in the sense that the weights depend on instantaneous coordinates,
584 * not on pre-set weights. Thus we resize the local weight buffer here.
586 if (pgrp
->epgrppbc
== epgrppbcCOS
)
588 pgrp
->localWeights
.resize(pgrp
->atomSet
.localIndex().size());
592 gmx::arrayRefFromArray(comm
->comBuffer
.data() + g
*c_comBufferStride
, c_comBufferStride
);
594 if (pgrp
->needToCalcCom
)
596 if (pgrp
->epgrppbc
!= epgrppbcCOS
)
598 rvec x_pbc
= { 0, 0, 0 };
600 switch (pgrp
->epgrppbc
)
603 /* Set the pbc atom */
604 copy_rvec(comm
->pbcAtomBuffer
[g
], x_pbc
);
606 case epgrppbcPREVSTEPCOM
:
607 /* Set the pbc reference to the COM of the group of the last step */
608 copy_dvec_to_rvec(pgrp
->x_prev_step
, comm
->pbcAtomBuffer
[g
]);
609 copy_dvec_to_rvec(pgrp
->x_prev_step
, x_pbc
);
612 /* The final sums should end up in comSums[0] */
613 ComSums
&comSumsTotal
= pull
->comSums
[0];
615 /* If we have a single-atom group the mass is irrelevant, so
616 * we can remove the mass factor to avoid division by zero.
617 * Note that with constraint pulling the mass does matter, but
618 * in that case a check group mass != 0 has been done before.
620 if (pgrp
->params
.nat
== 1 &&
621 pgrp
->atomSet
.numAtomsLocal() == 1 &&
622 md
->massT
[pgrp
->atomSet
.localIndex()[0]] == 0)
624 GMX_ASSERT(xp
== nullptr, "We should not have groups with zero mass with constraints, i.e. xp!=NULL");
626 /* Copy the single atom coordinate */
627 for (int d
= 0; d
< DIM
; d
++)
629 comSumsTotal
.sum_wmx
[d
] = x
[pgrp
->atomSet
.localIndex()[0]][d
];
631 /* Set all mass factors to 1 to get the correct COM */
632 comSumsTotal
.sum_wm
= 1;
633 comSumsTotal
.sum_wwm
= 1;
635 else if (pgrp
->atomSet
.numAtomsLocal() <= c_pullMaxNumLocalAtomsSingleThreaded
)
637 sum_com_part(pgrp
, 0, pgrp
->atomSet
.numAtomsLocal(),
644 #pragma omp parallel for num_threads(pull->nthreads) schedule(static)
645 for (int t
= 0; t
< pull
->nthreads
; t
++)
647 int ind_start
= (pgrp
->atomSet
.numAtomsLocal()*(t
+ 0))/pull
->nthreads
;
648 int ind_end
= (pgrp
->atomSet
.numAtomsLocal()*(t
+ 1))/pull
->nthreads
;
649 sum_com_part(pgrp
, ind_start
, ind_end
,
655 /* Reduce the thread contributions to sum_com[0] */
656 for (int t
= 1; t
< pull
->nthreads
; t
++)
658 comSumsTotal
.sum_wm
+= pull
->comSums
[t
].sum_wm
;
659 comSumsTotal
.sum_wwm
+= pull
->comSums
[t
].sum_wwm
;
660 dvec_inc(comSumsTotal
.sum_wmx
, pull
->comSums
[t
].sum_wmx
);
661 dvec_inc(comSumsTotal
.sum_wmxp
, pull
->comSums
[t
].sum_wmxp
);
665 if (pgrp
->localWeights
.empty())
667 comSumsTotal
.sum_wwm
= comSumsTotal
.sum_wm
;
670 /* Copy local sums to a buffer for global summing */
671 copy_dvec(comSumsTotal
.sum_wmx
, comBuffer
[0]);
673 copy_dvec(comSumsTotal
.sum_wmxp
, comBuffer
[1]);
675 comBuffer
[2][0] = comSumsTotal
.sum_wm
;
676 comBuffer
[2][1] = comSumsTotal
.sum_wwm
;
681 /* Cosine weighting geometry.
682 * This uses a slab of the system, thus we always have many
683 * atoms in the pull groups. Therefore, always use threads.
685 #pragma omp parallel for num_threads(pull->nthreads) schedule(static)
686 for (int t
= 0; t
< pull
->nthreads
; t
++)
688 int ind_start
= (pgrp
->atomSet
.numAtomsLocal()*(t
+ 0))/pull
->nthreads
;
689 int ind_end
= (pgrp
->atomSet
.numAtomsLocal()*(t
+ 1))/pull
->nthreads
;
690 sum_com_part_cosweight(pgrp
, ind_start
, ind_end
,
691 pull
->cosdim
, twopi_box
,
696 /* Reduce the thread contributions to comSums[0] */
697 ComSums
&comSumsTotal
= pull
->comSums
[0];
698 for (int t
= 1; t
< pull
->nthreads
; t
++)
700 comSumsTotal
.sum_cm
+= pull
->comSums
[t
].sum_cm
;
701 comSumsTotal
.sum_sm
+= pull
->comSums
[t
].sum_sm
;
702 comSumsTotal
.sum_ccm
+= pull
->comSums
[t
].sum_ccm
;
703 comSumsTotal
.sum_csm
+= pull
->comSums
[t
].sum_csm
;
704 comSumsTotal
.sum_ssm
+= pull
->comSums
[t
].sum_ssm
;
705 comSumsTotal
.sum_cmp
+= pull
->comSums
[t
].sum_cmp
;
706 comSumsTotal
.sum_smp
+= pull
->comSums
[t
].sum_smp
;
709 /* Copy local sums to a buffer for global summing */
710 comBuffer
[0][0] = comSumsTotal
.sum_cm
;
711 comBuffer
[0][1] = comSumsTotal
.sum_sm
;
713 comBuffer
[1][0] = comSumsTotal
.sum_ccm
;
714 comBuffer
[1][1] = comSumsTotal
.sum_csm
;
715 comBuffer
[1][2] = comSumsTotal
.sum_ssm
;
716 comBuffer
[2][0] = comSumsTotal
.sum_cmp
;
717 comBuffer
[2][1] = comSumsTotal
.sum_smp
;
723 clear_dvec(comBuffer
[0]);
724 clear_dvec(comBuffer
[1]);
725 clear_dvec(comBuffer
[2]);
729 pullAllReduce(cr
, comm
, pull
->group
.size()*c_comBufferStride
*DIM
,
730 static_cast<double *>(comm
->comBuffer
[0]));
732 for (size_t g
= 0; g
< pull
->group
.size(); g
++)
734 pull_group_work_t
*pgrp
;
736 pgrp
= &pull
->group
[g
];
737 if (pgrp
->needToCalcCom
)
739 GMX_ASSERT(pgrp
->params
.nat
> 0, "Normal pull groups should have atoms, only group 0, which should have bCalcCom=FALSE has nat=0");
741 const auto comBuffer
= gmx::constArrayRefFromArray(comm
->comBuffer
.data() + g
*c_comBufferStride
, c_comBufferStride
);
743 if (pgrp
->epgrppbc
!= epgrppbcCOS
)
745 double wmass
, wwmass
;
748 /* Determine the inverse mass */
749 wmass
= comBuffer
[2][0];
750 wwmass
= comBuffer
[2][1];
751 pgrp
->mwscale
= 1.0/wmass
;
752 /* invtm==0 signals a frozen group, so then we should keep it zero */
753 if (pgrp
->invtm
!= 0)
755 pgrp
->wscale
= wmass
/wwmass
;
756 pgrp
->invtm
= wwmass
/(wmass
*wmass
);
758 /* Divide by the total mass */
759 for (m
= 0; m
< DIM
; m
++)
761 pgrp
->x
[m
] = comBuffer
[0][m
]*pgrp
->mwscale
;
764 pgrp
->xp
[m
] = comBuffer
[1][m
]*pgrp
->mwscale
;
766 if (pgrp
->epgrppbc
== epgrppbcREFAT
|| pgrp
->epgrppbc
== epgrppbcPREVSTEPCOM
)
768 pgrp
->x
[m
] += comm
->pbcAtomBuffer
[g
][m
];
771 pgrp
->xp
[m
] += comm
->pbcAtomBuffer
[g
][m
];
778 /* Cosine weighting geometry */
779 double csw
, snw
, wmass
, wwmass
;
781 /* Determine the optimal location of the cosine weight */
782 csw
= comBuffer
[0][0];
783 snw
= comBuffer
[0][1];
784 pgrp
->x
[pull
->cosdim
] = atan2_0_2pi(snw
, csw
)/twopi_box
;
785 /* Set the weights for the local atoms */
786 wmass
= sqrt(csw
*csw
+ snw
*snw
);
787 wwmass
= (comBuffer
[1][0]*csw
*csw
+
788 comBuffer
[1][1]*csw
*snw
+
789 comBuffer
[1][2]*snw
*snw
)/(wmass
*wmass
);
791 pgrp
->mwscale
= 1.0/wmass
;
792 pgrp
->wscale
= wmass
/wwmass
;
793 pgrp
->invtm
= wwmass
/(wmass
*wmass
);
794 /* Set the weights for the local atoms */
797 for (size_t i
= 0; i
< pgrp
->atomSet
.numAtomsLocal(); i
++)
799 int ii
= pgrp
->atomSet
.localIndex()[i
];
800 pgrp
->localWeights
[i
] = csw
*std::cos(twopi_box
*x
[ii
][pull
->cosdim
]) +
801 snw
*std::sin(twopi_box
*x
[ii
][pull
->cosdim
]);
805 csw
= comBuffer
[2][0];
806 snw
= comBuffer
[2][1];
807 pgrp
->xp
[pull
->cosdim
] = atan2_0_2pi(snw
, csw
)/twopi_box
;
812 fprintf(debug
, "Pull group %zu wmass %f invtm %f\n",
813 g
, 1.0/pgrp
->mwscale
, pgrp
->invtm
);
820 /* Calculate the COMs for the cyclinder reference groups */
821 make_cyl_refgrps(cr
, pull
, md
, pbc
, t
, x
);
825 using BoolVec
= gmx::BasicVector
<bool>;
827 /* Returns whether the pull group obeys the PBC restrictions */
828 static bool pullGroupObeysPbcRestrictions(const pull_group_work_t
&group
,
829 const BoolVec
&dimUsed
,
832 const gmx::RVec
&x_pbc
,
833 const real pbcMargin
)
835 /* Determine which dimensions are relevant for PBC */
836 BoolVec dimUsesPbc
= { false, false, false };
837 bool pbcIsRectangular
= true;
838 for (int d
= 0; d
< pbc
.ndim_ePBC
; d
++)
842 dimUsesPbc
[d
] = true;
843 /* All non-zero dimensions of vector v are involved in PBC */
844 for (int d2
= d
+ 1; d2
< pbc
.ndim_ePBC
; d2
++)
847 if (pbc
.box
[d2
][d
] != 0)
849 dimUsesPbc
[d2
] = true;
850 pbcIsRectangular
= false;
856 rvec marginPerDim
= {};
857 real marginDistance2
= 0;
858 if (pbcIsRectangular
)
860 /* Use margins for dimensions independently */
861 for (int d
= 0; d
< pbc
.ndim_ePBC
; d
++)
863 marginPerDim
[d
] = pbcMargin
*pbc
.hbox_diag
[d
];
868 /* Check the total distance along the relevant dimensions */
869 for (int d
= 0; d
< pbc
.ndim_ePBC
; d
++)
873 marginDistance2
+= pbcMargin
*gmx::square(0.5)*norm2(pbc
.box
[d
]);
878 auto localAtomIndices
= group
.atomSet
.localIndex();
879 for (gmx::index indexInSet
= 0; indexInSet
< localAtomIndices
.ssize(); indexInSet
++)
882 pbc_dx(&pbc
, x
[localAtomIndices
[indexInSet
]], x_pbc
, dx
);
884 bool atomIsTooFar
= false;
885 if (pbcIsRectangular
)
887 for (int d
= 0; d
< pbc
.ndim_ePBC
; d
++)
889 if (dimUsesPbc
[d
] && (dx
[d
] < -marginPerDim
[d
] ||
890 dx
[d
] > marginPerDim
[d
]))
898 real pbcDistance2
= 0;
899 for (int d
= 0; d
< pbc
.ndim_ePBC
; d
++)
903 pbcDistance2
+= gmx::square(dx
[d
]);
906 atomIsTooFar
= (pbcDistance2
> marginDistance2
);
917 int pullCheckPbcWithinGroups(const pull_t
&pull
,
922 if (pbc
.ePBC
== epbcNONE
)
927 /* Determine what dimensions are used for each group by pull coordinates */
928 std::vector
<BoolVec
> dimUsed(pull
.group
.size(), { false, false, false });
929 for (size_t c
= 0; c
< pull
.coord
.size(); c
++)
931 const t_pull_coord
&coordParams
= pull
.coord
[c
].params
;
932 for (int groupIndex
= 0; groupIndex
< coordParams
.ngroup
; groupIndex
++)
934 for (int d
= 0; d
< DIM
; d
++)
936 if (coordParams
.dim
[d
] &&
937 !(coordParams
.eGeom
== epullgCYL
&& groupIndex
== 0))
939 dimUsed
[coordParams
.group
[groupIndex
]][d
] = true;
945 /* Check PBC for every group that uses a PBC reference atom treatment */
946 for (size_t g
= 0; g
< pull
.group
.size(); g
++)
948 const pull_group_work_t
&group
= pull
.group
[g
];
949 if ((group
.epgrppbc
== epgrppbcREFAT
|| group
.epgrppbc
== epgrppbcPREVSTEPCOM
) &&
950 !pullGroupObeysPbcRestrictions(group
, dimUsed
[g
], x
, pbc
, pull
.comm
.pbcAtomBuffer
[g
], pbcMargin
))
959 bool pullCheckPbcWithinGroup(const pull_t
&pull
,
960 gmx::ArrayRef
<const gmx::RVec
> x
,
965 if (pbc
.ePBC
== epbcNONE
)
969 GMX_ASSERT(groupNr
< gmx::ssize(pull
.group
), "groupNr is out of range");
971 /* Check PBC if the group uses a PBC reference atom treatment. */
972 const pull_group_work_t
&group
= pull
.group
[groupNr
];
973 if (group
.epgrppbc
!= epgrppbcREFAT
&& group
.epgrppbc
!= epgrppbcPREVSTEPCOM
)
978 /* Determine what dimensions are used for each group by pull coordinates */
979 BoolVec dimUsed
= { false, false, false };
980 for (size_t c
= 0; c
< pull
.coord
.size(); c
++)
982 const t_pull_coord
&coordParams
= pull
.coord
[c
].params
;
983 for (int groupIndex
= 0; groupIndex
< coordParams
.ngroup
; groupIndex
++)
985 if (coordParams
.group
[groupIndex
] == groupNr
)
987 for (int d
= 0; d
< DIM
; d
++)
989 if (coordParams
.dim
[d
] &&
990 !(coordParams
.eGeom
== epullgCYL
&& groupIndex
== 0))
999 return (pullGroupObeysPbcRestrictions(group
, dimUsed
, as_rvec_array(x
.data()), pbc
, pull
.comm
.pbcAtomBuffer
[groupNr
], pbcMargin
));
1002 void setPrevStepPullComFromState(struct pull_t
*pull
, const t_state
*state
)
1004 for (size_t g
= 0; g
< pull
->group
.size(); g
++)
1006 for (int j
= 0; j
< DIM
; j
++)
1008 pull
->group
[g
].x_prev_step
[j
] = state
->pull_com_prev_step
[g
*DIM
+j
];
1013 void updatePrevStepPullCom(struct pull_t
*pull
, t_state
*state
)
1015 for (size_t g
= 0; g
< pull
->group
.size(); g
++)
1017 if (pull
->group
[g
].needToCalcCom
)
1019 for (int j
= 0; j
< DIM
; j
++)
1021 pull
->group
[g
].x_prev_step
[j
] = pull
->group
[g
].x
[j
];
1022 state
->pull_com_prev_step
[g
*DIM
+j
] = pull
->group
[g
].x
[j
];
1028 void allocStatePrevStepPullCom(t_state
*state
, const pull_t
*pull
)
1032 state
->pull_com_prev_step
.clear();
1035 size_t ngroup
= pull
->group
.size();
1036 if (state
->pull_com_prev_step
.size()/DIM
!= ngroup
)
1038 state
->pull_com_prev_step
.resize(ngroup
* DIM
, NAN
);
1042 void initPullComFromPrevStep(const t_commrec
*cr
,
1044 const t_mdatoms
*md
,
1048 pull_comm_t
*comm
= &pull
->comm
;
1049 size_t ngroup
= pull
->group
.size();
1051 if (!comm
->bParticipate
)
1056 GMX_ASSERT(comm
->pbcAtomBuffer
.size() == pull
->group
.size(), "pbcAtomBuffer should have size number of groups");
1057 GMX_ASSERT(comm
->comBuffer
.size() == pull
->group
.size()*c_comBufferStride
,
1058 "comBuffer should have size #group*c_comBufferStride");
1060 pull_set_pbcatoms(cr
, pull
, x
, comm
->pbcAtomBuffer
);
1062 for (size_t g
= 0; g
< ngroup
; g
++)
1064 pull_group_work_t
*pgrp
;
1066 pgrp
= &pull
->group
[g
];
1068 if (pgrp
->needToCalcCom
&& pgrp
->epgrppbc
== epgrppbcPREVSTEPCOM
)
1070 GMX_ASSERT(pgrp
->params
.nat
> 1, "Groups with no atoms, or only one atom, should not "
1071 "use the COM from the previous step as reference.");
1073 rvec x_pbc
= { 0, 0, 0 };
1074 copy_rvec(comm
->pbcAtomBuffer
[g
], x_pbc
);
1078 fprintf(debug
, "Initialising prev step COM of pull group %zu. x_pbc =", g
);
1079 for (int m
= 0; m
< DIM
; m
++)
1081 fprintf(debug
, " %f", x_pbc
[m
]);
1083 fprintf(debug
, "\n");
1086 /* The following is to a large extent similar to pull_calc_coms() */
1088 /* The final sums should end up in sum_com[0] */
1089 ComSums
&comSumsTotal
= pull
->comSums
[0];
1091 if (pgrp
->atomSet
.numAtomsLocal() <= c_pullMaxNumLocalAtomsSingleThreaded
)
1093 sum_com_part(pgrp
, 0, pgrp
->atomSet
.numAtomsLocal(),
1094 x
, nullptr, md
->massT
,
1100 #pragma omp parallel for num_threads(pull->nthreads) schedule(static)
1101 for (int t
= 0; t
< pull
->nthreads
; t
++)
1103 int ind_start
= (pgrp
->atomSet
.numAtomsLocal()*(t
+ 0))/pull
->nthreads
;
1104 int ind_end
= (pgrp
->atomSet
.numAtomsLocal()*(t
+ 1))/pull
->nthreads
;
1105 sum_com_part(pgrp
, ind_start
, ind_end
,
1106 x
, nullptr, md
->massT
,
1111 /* Reduce the thread contributions to sum_com[0] */
1112 for (int t
= 1; t
< pull
->nthreads
; t
++)
1114 comSumsTotal
.sum_wm
+= pull
->comSums
[t
].sum_wm
;
1115 comSumsTotal
.sum_wwm
+= pull
->comSums
[t
].sum_wwm
;
1116 dvec_inc(comSumsTotal
.sum_wmx
, pull
->comSums
[t
].sum_wmx
);
1117 dvec_inc(comSumsTotal
.sum_wmxp
, pull
->comSums
[t
].sum_wmxp
);
1121 if (pgrp
->localWeights
.empty())
1123 comSumsTotal
.sum_wwm
= comSumsTotal
.sum_wm
;
1126 /* Copy local sums to a buffer for global summing */
1128 gmx::arrayRefFromArray(comm
->comBuffer
.data() + g
*c_comBufferStride
, c_comBufferStride
);
1130 localSums
[0] = comSumsTotal
.sum_wmx
;
1131 localSums
[1] = comSumsTotal
.sum_wmxp
;
1132 localSums
[2][0] = comSumsTotal
.sum_wm
;
1133 localSums
[2][1] = comSumsTotal
.sum_wwm
;
1134 localSums
[2][2] = 0;
1138 pullAllReduce(cr
, comm
, ngroup
*c_comBufferStride
*DIM
, static_cast<double *>(comm
->comBuffer
[0]));
1140 for (size_t g
= 0; g
< ngroup
; g
++)
1142 pull_group_work_t
*pgrp
;
1144 pgrp
= &pull
->group
[g
];
1145 if (pgrp
->needToCalcCom
)
1147 if (pgrp
->epgrppbc
== epgrppbcPREVSTEPCOM
)
1150 gmx::arrayRefFromArray(comm
->comBuffer
.data() + g
*c_comBufferStride
, c_comBufferStride
);
1151 double wmass
, wwmass
;
1153 /* Determine the inverse mass */
1154 wmass
= localSums
[2][0];
1155 wwmass
= localSums
[2][1];
1156 pgrp
->mwscale
= 1.0/wmass
;
1157 /* invtm==0 signals a frozen group, so then we should keep it zero */
1158 if (pgrp
->invtm
!= 0)
1160 pgrp
->wscale
= wmass
/wwmass
;
1161 pgrp
->invtm
= wwmass
/(wmass
*wmass
);
1163 /* Divide by the total mass */
1164 for (int m
= 0; m
< DIM
; m
++)
1166 pgrp
->x
[m
] = localSums
[0][m
]*pgrp
->mwscale
;
1167 pgrp
->x
[m
] += comm
->pbcAtomBuffer
[g
][m
];
1171 fprintf(debug
, "Pull group %zu wmass %f invtm %f\n",
1172 g
, 1.0/pgrp
->mwscale
, pgrp
->invtm
);
1173 fprintf(debug
, "Initialising prev step COM of pull group %zu to", g
);
1174 for (int m
= 0; m
< DIM
; m
++)
1176 fprintf(debug
, " %f", pgrp
->x
[m
]);
1178 fprintf(debug
, "\n");
1180 copy_dvec(pgrp
->x
, pgrp
->x_prev_step
);