2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2006,2007,2008,2009,2010,2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file implements functions for domdec to use
39 * while managing inter-atomic constraints.
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_domdec
47 #include "domdec_specatomcomm.h"
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_network.h"
55 #include "gromacs/domdec/domdec_struct.h"
56 #include "gromacs/domdec/ga2la.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/mdtypes/commrec.h"
59 #include "gromacs/pbcutil/ishift.h"
60 #include "gromacs/utility/fatalerror.h"
61 #include "gromacs/utility/gmxassert.h"
65 void dd_move_f_specat(gmx_domdec_t
*dd
, gmx_domdec_specat_comm_t
*spac
,
66 rvec
*f
, rvec
*fshift
)
68 gmx_specatsend_t
*spas
;
70 int n
, n0
, n1
, dim
, dir
;
73 gmx_bool bPBC
, bScrew
;
76 for (int d
= dd
->ndim
- 1; d
>= 0; d
--)
81 /* Pulse the grid forward and backward */
86 vbuf
= as_rvec_array(spac
->vbuf
.data());
87 /* Send and receive the coordinates */
88 dd_sendrecv2_rvec(dd
, d
,
89 f
+ n
+ n1
, n0
, vbuf
, spas
[0].a
.size(),
90 f
+ n
, n1
, vbuf
+ spas
[0].a
.size(),
92 for (dir
= 0; dir
< 2; dir
++)
94 bPBC
= ((dir
== 0 && dd
->ci
[dim
] == 0) ||
95 (dir
== 1 && dd
->ci
[dim
] == dd
->nc
[dim
]-1));
96 bScrew
= (bPBC
&& dd
->bScrewPBC
&& dim
== XX
);
98 spas
= &spac
->spas
[d
][dir
];
99 /* Sum the buffer into the required forces */
100 if (!bPBC
|| (!bScrew
&& fshift
== nullptr))
102 for (int a
: spas
->a
)
104 rvec_inc(f
[a
], *vbuf
);
111 vis
[dim
] = (dir
== 0 ? 1 : -1);
115 /* Sum and add to shift forces */
116 for (int a
: spas
->a
)
118 rvec_inc(f
[a
], *vbuf
);
119 rvec_inc(fshift
[is
], *vbuf
);
125 /* Rotate the forces */
126 for (int a
: spas
->a
)
128 f
[a
][XX
] += (*vbuf
)[XX
];
129 f
[a
][YY
] -= (*vbuf
)[YY
];
130 f
[a
][ZZ
] -= (*vbuf
)[ZZ
];
133 rvec_inc(fshift
[is
], *vbuf
);
143 /* Two cells, so we only need to communicate one way */
144 spas
= &spac
->spas
[d
][0];
146 /* Send and receive the coordinates */
147 ddSendrecv(dd
, d
, dddirForward
,
149 as_rvec_array(spac
->vbuf
.data()), spas
->a
.size());
150 /* Sum the buffer into the required forces */
151 if (dd
->bScrewPBC
&& dim
== XX
&&
153 dd
->ci
[dim
] == dd
->nc
[dim
]-1))
156 for (int a
: spas
->a
)
158 /* Rotate the force */
159 f
[a
][XX
] += spac
->vbuf
[i
][XX
];
160 f
[a
][YY
] -= spac
->vbuf
[i
][YY
];
161 f
[a
][ZZ
] -= spac
->vbuf
[i
][ZZ
];
168 for (int a
: spas
->a
)
170 rvec_inc(f
[a
], spac
->vbuf
[i
]);
178 void dd_move_x_specat(gmx_domdec_t
*dd
, gmx_domdec_specat_comm_t
*spac
,
181 rvec
*x1
, gmx_bool bX1IsCoord
)
183 gmx_specatsend_t
*spas
;
184 int nvec
, v
, n
, nn
, ns0
, ns1
, nr0
, nr1
, nr
, d
, dim
, dir
, i
;
185 gmx_bool bPBC
, bScrew
= FALSE
;
186 rvec shift
= {0, 0, 0};
195 for (d
= 0; d
< dd
->ndim
; d
++)
200 /* Pulse the grid forward and backward */
201 rvec
*vbuf
= as_rvec_array(spac
->vbuf
.data());
202 for (dir
= 0; dir
< 2; dir
++)
204 if (dir
== 0 && dd
->ci
[dim
] == 0)
207 bScrew
= (dd
->bScrewPBC
&& dim
== XX
);
208 copy_rvec(box
[dim
], shift
);
210 else if (dir
== 1 && dd
->ci
[dim
] == dd
->nc
[dim
]-1)
213 bScrew
= (dd
->bScrewPBC
&& dim
== XX
);
214 for (i
= 0; i
< DIM
; i
++)
216 shift
[i
] = -box
[dim
][i
];
224 spas
= &spac
->spas
[d
][dir
];
225 for (v
= 0; v
< nvec
; v
++)
227 rvec
*x
= (v
== 0 ? x0
: x1
);
228 /* Copy the required coordinates to the send buffer */
229 if (!bPBC
|| (v
== 1 && !bX1IsCoord
))
232 for (int a
: spas
->a
)
234 copy_rvec(x
[a
], *vbuf
);
240 /* Shift coordinates */
241 for (int a
: spas
->a
)
243 rvec_add(x
[a
], shift
, *vbuf
);
249 /* Shift and rotate coordinates */
250 for (int a
: spas
->a
)
252 (*vbuf
)[XX
] = x
[a
][XX
] + shift
[XX
];
253 (*vbuf
)[YY
] = box
[YY
][YY
] - x
[a
][YY
] + shift
[YY
];
254 (*vbuf
)[ZZ
] = box
[ZZ
][ZZ
] - x
[a
][ZZ
] + shift
[ZZ
];
260 /* Send and receive the coordinates */
261 spas
= spac
->spas
[d
];
262 ns0
= spas
[0].a
.size();
264 ns1
= spas
[1].a
.size();
268 rvec
*vbuf
= as_rvec_array(spac
->vbuf
.data());
269 dd_sendrecv2_rvec(dd
, d
,
270 vbuf
+ ns0
, ns1
, x0
+ n
, nr1
,
271 vbuf
, ns0
, x0
+ n
+ nr1
, nr0
);
275 rvec
*vbuf
= as_rvec_array(spac
->vbuf
.data());
276 /* Communicate both vectors in one buffer */
277 rvec
*rbuf
= as_rvec_array(spac
->vbuf2
.data());
278 dd_sendrecv2_rvec(dd
, d
,
279 vbuf
+ 2*ns0
, 2*ns1
, rbuf
, 2*nr1
,
280 vbuf
, 2*ns0
, rbuf
+ 2*nr1
, 2*nr0
);
281 /* Split the buffer into the two vectors */
283 for (dir
= 1; dir
>= 0; dir
--)
285 nr
= spas
[dir
].nrecv
;
286 for (v
= 0; v
< 2; v
++)
288 rvec
*x
= (v
== 0 ? x0
: x1
);
289 for (i
= 0; i
< nr
; i
++)
291 copy_rvec(*rbuf
, x
[nn
+i
]);
302 spas
= &spac
->spas
[d
][0];
303 /* Copy the required coordinates to the send buffer */
304 rvec
*vbuf
= as_rvec_array(spac
->vbuf
.data());
305 for (v
= 0; v
< nvec
; v
++)
307 rvec
*x
= (v
== 0 ? x0
: x1
);
308 if (dd
->bScrewPBC
&& dim
== XX
&&
309 (dd
->ci
[XX
] == 0 || dd
->ci
[XX
] == dd
->nc
[XX
]-1))
311 /* Here we only perform the rotation, the rest of the pbc
312 * is handled in the constraint or viste routines.
314 for (int a
: spas
->a
)
316 (*vbuf
)[XX
] = x
[a
][XX
];
317 (*vbuf
)[YY
] = box
[YY
][YY
] - x
[a
][YY
];
318 (*vbuf
)[ZZ
] = box
[ZZ
][ZZ
] - x
[a
][ZZ
];
324 for (int a
: spas
->a
)
326 copy_rvec(x
[a
], *vbuf
);
331 /* Send and receive the coordinates */
334 rvec
*vbuf
= as_rvec_array(spac
->vbuf
.data());
335 ddSendrecv(dd
, d
, dddirBackward
,
336 vbuf
, spas
->a
.size(), x0
+ n
, spas
->nrecv
);
340 rvec
*vbuf
= as_rvec_array(spac
->vbuf
.data());
341 /* Communicate both vectors in one buffer */
342 rvec
*rbuf
= as_rvec_array(spac
->vbuf2
.data());
343 ddSendrecv(dd
, d
, dddirBackward
,
344 vbuf
, 2*spas
->a
.size(), rbuf
, 2*spas
->nrecv
);
345 /* Split the buffer into the two vectors */
347 for (v
= 0; v
< 2; v
++)
349 rvec
*x
= (v
== 0 ? x0
: x1
);
350 for (i
= 0; i
< nr
; i
++)
352 copy_rvec(*rbuf
, x
[n
+i
]);
362 int setup_specat_communication(gmx_domdec_t
*dd
,
363 std::vector
<int> *ireq
,
364 gmx_domdec_specat_comm_t
*spac
,
365 gmx_hash_t
*ga2la_specat
,
368 const char *specat_type
,
371 int nsend
[2], nlast
, nsend_zero
[2] = {0, 0}, *nsend_ptr
;
372 int dim
, ndir
, nr
, ns
, nrecv_local
, n0
, start
, indr
, ind
, buf
[2];
373 int nat_tot_specat
, nat_tot_prev
;
375 gmx_specatsend_t
*spas
;
379 fprintf(debug
, "Begin setup_specat_communication for %s\n", specat_type
);
382 /* nsend[0]: the number of atoms requested by this node only,
383 * we communicate this for more efficients checks
384 * nsend[1]: the total number of requested atoms
386 const int numRequested
= ireq
->size();
387 nsend
[0] = ireq
->size();
390 for (int d
= dd
->ndim
-1; d
>= 0; d
--)
392 /* Pulse the grid forward and backward */
394 bPBC
= (dim
< dd
->npbcdim
);
395 if (dd
->nc
[dim
] == 2)
397 /* Only 2 cells, so we only need to communicate once */
404 for (int dir
= 0; dir
< ndir
; dir
++)
408 ((dir
== 0 && dd
->ci
[dim
] == dd
->nc
[dim
] - 1) ||
409 (dir
== 1 && dd
->ci
[dim
] == 0)))
411 /* No pbc: the fist/last cell should not request atoms */
412 nsend_ptr
= nsend_zero
;
418 /* Communicate the number of indices */
419 ddSendrecv(dd
, d
, dir
== 0 ? dddirForward
: dddirBackward
,
420 nsend_ptr
, 2, spac
->nreq
[d
][dir
], 2);
421 nr
= spac
->nreq
[d
][dir
][1];
422 ireq
->resize(nlast
+ nr
);
423 /* Communicate the indices */
424 ddSendrecv(dd
, d
, dir
== 0 ? dddirForward
: dddirBackward
,
425 ireq
->data(), nsend_ptr
[1], ireq
->data() + nlast
, nr
);
432 fprintf(debug
, "Communicated the counts\n");
435 /* Search for the requested atoms and communicate the indices we have */
436 nat_tot_specat
= at_start
;
438 for (int d
= 0; d
< dd
->ndim
; d
++)
440 /* Pulse the grid forward and backward */
441 if (dd
->dim
[d
] >= dd
->npbcdim
|| dd
->nc
[dd
->dim
[d
]] > 2)
449 nat_tot_prev
= nat_tot_specat
;
450 for (int dir
= ndir
- 1; dir
>= 0; dir
--)
452 /* To avoid cost of clearing by resize(), we only increase size */
453 if (static_cast<size_t>(nat_tot_specat
) > spac
->sendAtom
.size())
455 /* Note: resize initializes new elements to false, which is actually needed here */
456 spac
->sendAtom
.resize(nat_tot_specat
);
458 spas
= &spac
->spas
[d
][dir
];
459 n0
= spac
->nreq
[d
][dir
][0];
460 nr
= spac
->nreq
[d
][dir
][1];
463 fprintf(debug
, "dim=%d, dir=%d, searching for %d atoms\n",
470 for (int i
= 0; i
< nr
; i
++)
472 indr
= (*ireq
)[start
+ i
];
474 /* Check if this is a home atom and if so ind will be set */
475 if (!ga2la_get_home(dd
->ga2la
, indr
, &ind
))
477 /* Search in the communicated atoms */
478 ind
= gmx_hash_get_minone(ga2la_specat
, indr
);
482 if (i
< n0
|| !spac
->sendAtom
[ind
])
484 /* Store the local index so we know which coordinates
487 spas
->a
.push_back(ind
);
488 spac
->sendAtom
[ind
] = true;
489 /* Store the global index so we can send it now */
490 spac
->ibuf
.push_back(indr
);
499 /* Clear the local flags */
500 for (int a
: spas
->a
)
502 spac
->sendAtom
[a
] = false;
504 /* Send and receive the number of indices to communicate */
505 nsend
[1] = spas
->a
.size();
506 ddSendrecv(dd
, d
, dir
== 0 ? dddirBackward
: dddirForward
,
510 fprintf(debug
, "Send to rank %d, %d (%d) indices, "
511 "receive from rank %d, %d (%d) indices\n",
512 dd
->neighbor
[d
][1-dir
], nsend
[1], nsend
[0],
513 dd
->neighbor
[d
][dir
], buf
[1], buf
[0]);
516 for (int i
: spac
->ibuf
)
518 fprintf(debug
, " %d", i
+ 1);
520 fprintf(debug
, "\n");
523 nrecv_local
+= buf
[0];
524 spas
->nrecv
= buf
[1];
525 dd
->globalAtomIndices
.resize(nat_tot_specat
+ spas
->nrecv
);
526 /* Send and receive the indices */
527 ddSendrecv(dd
, d
, dir
== 0 ? dddirBackward
: dddirForward
,
528 spac
->ibuf
.data(), spac
->ibuf
.size(),
529 dd
->globalAtomIndices
.data() + nat_tot_specat
, spas
->nrecv
);
530 nat_tot_specat
+= spas
->nrecv
;
533 /* Increase the x/f communication buffer sizes, when necessary */
534 ns
= spac
->spas
[d
][0].a
.size();
535 nr
= spac
->spas
[d
][0].nrecv
;
538 ns
+= spac
->spas
[d
][1].a
.size();
539 nr
+= spac
->spas
[d
][1].nrecv
;
541 if (vbuf_fac
*ns
> gmx::index(spac
->vbuf
.size()))
543 spac
->vbuf
.resize(vbuf_fac
*ns
);
545 if (vbuf_fac
== 2 && vbuf_fac
*nr
> gmx::index(spac
->vbuf2
.size()))
547 spac
->vbuf2
.resize(vbuf_fac
*nr
);
550 /* Make a global to local index for the communication atoms */
551 for (int i
= nat_tot_prev
; i
< nat_tot_specat
; i
++)
553 gmx_hash_change_or_set(ga2la_specat
, dd
->globalAtomIndices
[i
], i
);
557 /* Check that in the end we got the number of atoms we asked for */
558 if (nrecv_local
!= numRequested
)
562 fprintf(debug
, "Requested %d, received %d (tot recv %d)\n",
563 numRequested
, nrecv_local
, nat_tot_specat
- at_start
);
566 for (int i
= 0; i
< numRequested
; i
++)
568 int ind
= gmx_hash_get_minone(ga2la_specat
, (*ireq
)[i
]);
569 fprintf(debug
, " %s%d",
570 (ind
>= 0) ? "" : "!",
573 fprintf(debug
, "\n");
576 fprintf(stderr
, "\nDD cell %d %d %d: Neighboring cells do not have atoms:",
577 dd
->ci
[XX
], dd
->ci
[YY
], dd
->ci
[ZZ
]);
578 for (int i
= 0; i
< numRequested
; i
++)
580 if (gmx_hash_get_minone(ga2la_specat
, (*ireq
)[i
]) < 0)
582 fprintf(stderr
, " %d", (*ireq
)[i
] + 1);
585 fprintf(stderr
, "\n");
586 gmx_fatal(FARGS
, "DD cell %d %d %d could only obtain %d of the %d atoms that are connected via %ss from the neighboring cells. This probably means your %s lengths are too long compared to the domain decomposition cell size. Decrease the number of domain decomposition grid cells%s%s.",
587 dd
->ci
[XX
], dd
->ci
[YY
], dd
->ci
[ZZ
],
588 nrecv_local
, numRequested
, specat_type
,
589 specat_type
, add_err
,
590 dd_dlb_is_on(dd
) ? " or use the -rcon option of mdrun" : "");
593 spac
->at_start
= at_start
;
594 spac
->at_end
= nat_tot_specat
;
598 fprintf(debug
, "Done setup_specat_communication\n");
601 return nat_tot_specat
;