2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2006,2007,2008,2009,2010,2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file implements functions for domdec to use
39 * while managing inter-atomic constraints.
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_domdec
47 #include "domdec_specatomcomm.h"
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_network.h"
55 #include "gromacs/domdec/ga2la.h"
56 #include "gromacs/gmxlib/gmx_omp_nthreads.h"
57 #include "gromacs/legacyheaders/types/commrec.h"
58 #include "gromacs/math/vec.h"
59 #include "gromacs/pbcutil/ishift.h"
60 #include "gromacs/utility/fatalerror.h"
61 #include "gromacs/utility/gmxassert.h"
62 #include "gromacs/utility/smalloc.h"
66 void dd_move_f_specat(gmx_domdec_t
*dd
, gmx_domdec_specat_comm_t
*spac
,
67 rvec
*f
, rvec
*fshift
)
69 gmx_specatsend_t
*spas
;
71 int n
, n0
, n1
, d
, dim
, dir
, i
;
74 gmx_bool bPBC
, bScrew
;
77 for (d
= dd
->ndim
-1; d
>= 0; d
--)
82 /* Pulse the grid forward and backward */
88 /* Send and receive the coordinates */
89 dd_sendrecv2_rvec(dd
, d
,
90 f
+n
+n1
, n0
, vbuf
, spas
[0].nsend
,
91 f
+n
, n1
, vbuf
+spas
[0].nsend
, spas
[1].nsend
);
92 for (dir
= 0; dir
< 2; dir
++)
94 bPBC
= ((dir
== 0 && dd
->ci
[dim
] == 0) ||
95 (dir
== 1 && dd
->ci
[dim
] == dd
->nc
[dim
]-1));
96 bScrew
= (bPBC
&& dd
->bScrewPBC
&& dim
== XX
);
98 spas
= &spac
->spas
[d
][dir
];
99 /* Sum the buffer into the required forces */
100 if (!bPBC
|| (!bScrew
&& fshift
== NULL
))
102 for (i
= 0; i
< spas
->nsend
; i
++)
104 rvec_inc(f
[spas
->a
[i
]], *vbuf
);
111 vis
[dim
] = (dir
== 0 ? 1 : -1);
115 /* Sum and add to shift forces */
116 for (i
= 0; i
< spas
->nsend
; i
++)
118 rvec_inc(f
[spas
->a
[i
]], *vbuf
);
119 rvec_inc(fshift
[is
], *vbuf
);
125 /* Rotate the forces */
126 for (i
= 0; i
< spas
->nsend
; i
++)
128 f
[spas
->a
[i
]][XX
] += (*vbuf
)[XX
];
129 f
[spas
->a
[i
]][YY
] -= (*vbuf
)[YY
];
130 f
[spas
->a
[i
]][ZZ
] -= (*vbuf
)[ZZ
];
133 rvec_inc(fshift
[is
], *vbuf
);
143 /* Two cells, so we only need to communicate one way */
144 spas
= &spac
->spas
[d
][0];
146 /* Send and receive the coordinates */
147 dd_sendrecv_rvec(dd
, d
, dddirForward
,
148 f
+n
, spas
->nrecv
, spac
->vbuf
, spas
->nsend
);
149 /* Sum the buffer into the required forces */
150 if (dd
->bScrewPBC
&& dim
== XX
&&
152 dd
->ci
[dim
] == dd
->nc
[dim
]-1))
154 for (i
= 0; i
< spas
->nsend
; i
++)
156 /* Rotate the force */
157 f
[spas
->a
[i
]][XX
] += spac
->vbuf
[i
][XX
];
158 f
[spas
->a
[i
]][YY
] -= spac
->vbuf
[i
][YY
];
159 f
[spas
->a
[i
]][ZZ
] -= spac
->vbuf
[i
][ZZ
];
164 for (i
= 0; i
< spas
->nsend
; i
++)
166 rvec_inc(f
[spas
->a
[i
]], spac
->vbuf
[i
]);
173 void dd_move_x_specat(gmx_domdec_t
*dd
, gmx_domdec_specat_comm_t
*spac
,
176 rvec
*x1
, gmx_bool bX1IsCoord
)
178 gmx_specatsend_t
*spas
;
179 rvec
*x
, *vbuf
, *rbuf
;
180 int nvec
, v
, n
, nn
, ns0
, ns1
, nr0
, nr1
, nr
, d
, dim
, dir
, i
;
181 gmx_bool bPBC
, bScrew
= FALSE
;
182 rvec shift
= {0, 0, 0};
191 for (d
= 0; d
< dd
->ndim
; d
++)
196 /* Pulse the grid forward and backward */
198 for (dir
= 0; dir
< 2; dir
++)
200 if (dir
== 0 && dd
->ci
[dim
] == 0)
203 bScrew
= (dd
->bScrewPBC
&& dim
== XX
);
204 copy_rvec(box
[dim
], shift
);
206 else if (dir
== 1 && dd
->ci
[dim
] == dd
->nc
[dim
]-1)
209 bScrew
= (dd
->bScrewPBC
&& dim
== XX
);
210 for (i
= 0; i
< DIM
; i
++)
212 shift
[i
] = -box
[dim
][i
];
220 spas
= &spac
->spas
[d
][dir
];
221 for (v
= 0; v
< nvec
; v
++)
223 x
= (v
== 0 ? x0
: x1
);
224 /* Copy the required coordinates to the send buffer */
225 if (!bPBC
|| (v
== 1 && !bX1IsCoord
))
228 for (i
= 0; i
< spas
->nsend
; i
++)
230 copy_rvec(x
[spas
->a
[i
]], *vbuf
);
236 /* Shift coordinates */
237 for (i
= 0; i
< spas
->nsend
; i
++)
239 rvec_add(x
[spas
->a
[i
]], shift
, *vbuf
);
245 /* Shift and rotate coordinates */
246 for (i
= 0; i
< spas
->nsend
; i
++)
248 (*vbuf
)[XX
] = x
[spas
->a
[i
]][XX
] + shift
[XX
];
249 (*vbuf
)[YY
] = box
[YY
][YY
] - x
[spas
->a
[i
]][YY
] + shift
[YY
];
250 (*vbuf
)[ZZ
] = box
[ZZ
][ZZ
] - x
[spas
->a
[i
]][ZZ
] + shift
[ZZ
];
256 /* Send and receive the coordinates */
257 spas
= spac
->spas
[d
];
264 dd_sendrecv2_rvec(dd
, d
,
265 spac
->vbuf
+ns0
, ns1
, x0
+n
, nr1
,
266 spac
->vbuf
, ns0
, x0
+n
+nr1
, nr0
);
270 /* Communicate both vectors in one buffer */
272 dd_sendrecv2_rvec(dd
, d
,
273 spac
->vbuf
+2*ns0
, 2*ns1
, rbuf
, 2*nr1
,
274 spac
->vbuf
, 2*ns0
, rbuf
+2*nr1
, 2*nr0
);
275 /* Split the buffer into the two vectors */
277 for (dir
= 1; dir
>= 0; dir
--)
279 nr
= spas
[dir
].nrecv
;
280 for (v
= 0; v
< 2; v
++)
282 x
= (v
== 0 ? x0
: x1
);
283 for (i
= 0; i
< nr
; i
++)
285 copy_rvec(*rbuf
, x
[nn
+i
]);
296 spas
= &spac
->spas
[d
][0];
297 /* Copy the required coordinates to the send buffer */
299 for (v
= 0; v
< nvec
; v
++)
301 x
= (v
== 0 ? x0
: x1
);
302 if (dd
->bScrewPBC
&& dim
== XX
&&
303 (dd
->ci
[XX
] == 0 || dd
->ci
[XX
] == dd
->nc
[XX
]-1))
305 /* Here we only perform the rotation, the rest of the pbc
306 * is handled in the constraint or viste routines.
308 for (i
= 0; i
< spas
->nsend
; i
++)
310 (*vbuf
)[XX
] = x
[spas
->a
[i
]][XX
];
311 (*vbuf
)[YY
] = box
[YY
][YY
] - x
[spas
->a
[i
]][YY
];
312 (*vbuf
)[ZZ
] = box
[ZZ
][ZZ
] - x
[spas
->a
[i
]][ZZ
];
318 for (i
= 0; i
< spas
->nsend
; i
++)
320 copy_rvec(x
[spas
->a
[i
]], *vbuf
);
325 /* Send and receive the coordinates */
328 dd_sendrecv_rvec(dd
, d
, dddirBackward
,
329 spac
->vbuf
, spas
->nsend
, x0
+n
, spas
->nrecv
);
333 /* Communicate both vectors in one buffer */
335 dd_sendrecv_rvec(dd
, d
, dddirBackward
,
336 spac
->vbuf
, 2*spas
->nsend
, rbuf
, 2*spas
->nrecv
);
337 /* Split the buffer into the two vectors */
339 for (v
= 0; v
< 2; v
++)
341 x
= (v
== 0 ? x0
: x1
);
342 for (i
= 0; i
< nr
; i
++)
344 copy_rvec(*rbuf
, x
[n
+i
]);
354 int setup_specat_communication(gmx_domdec_t
*dd
,
356 gmx_domdec_specat_comm_t
*spac
,
357 gmx_hash_t ga2la_specat
,
360 const char *specat_type
,
363 int nsend
[2], nlast
, nsend_zero
[2] = {0, 0}, *nsend_ptr
;
364 int d
, dim
, ndir
, dir
, nr
, ns
, i
, nrecv_local
, n0
, start
, indr
, ind
, buf
[2];
365 int nat_tot_specat
, nat_tot_prev
, nalloc_old
;
367 gmx_specatsend_t
*spas
;
371 fprintf(debug
, "Begin setup_specat_communication for %s\n", specat_type
);
374 /* nsend[0]: the number of atoms requested by this node only,
375 * we communicate this for more efficients checks
376 * nsend[1]: the total number of requested atoms
381 for (d
= dd
->ndim
-1; d
>= 0; d
--)
383 /* Pulse the grid forward and backward */
385 bPBC
= (dim
< dd
->npbcdim
);
386 if (dd
->nc
[dim
] == 2)
388 /* Only 2 cells, so we only need to communicate once */
395 for (dir
= 0; dir
< ndir
; dir
++)
399 ((dir
== 0 && dd
->ci
[dim
] == dd
->nc
[dim
] - 1) ||
400 (dir
== 1 && dd
->ci
[dim
] == 0)))
402 /* No pbc: the fist/last cell should not request atoms */
403 nsend_ptr
= nsend_zero
;
409 /* Communicate the number of indices */
410 dd_sendrecv_int(dd
, d
, dir
== 0 ? dddirForward
: dddirBackward
,
411 nsend_ptr
, 2, spac
->nreq
[d
][dir
], 2);
412 nr
= spac
->nreq
[d
][dir
][1];
413 if (nlast
+nr
> ireq
->nalloc
)
415 ireq
->nalloc
= over_alloc_dd(nlast
+nr
);
416 srenew(ireq
->ind
, ireq
->nalloc
);
418 /* Communicate the indices */
419 dd_sendrecv_int(dd
, d
, dir
== 0 ? dddirForward
: dddirBackward
,
420 ireq
->ind
, nsend_ptr
[1], ireq
->ind
+nlast
, nr
);
427 fprintf(debug
, "Communicated the counts\n");
430 /* Search for the requested atoms and communicate the indices we have */
431 nat_tot_specat
= at_start
;
433 for (d
= 0; d
< dd
->ndim
; d
++)
435 /* Pulse the grid forward and backward */
436 if (dd
->dim
[d
] >= dd
->npbcdim
|| dd
->nc
[dd
->dim
[d
]] > 2)
444 nat_tot_prev
= nat_tot_specat
;
445 for (dir
= ndir
-1; dir
>= 0; dir
--)
447 if (nat_tot_specat
> spac
->bSendAtom_nalloc
)
449 nalloc_old
= spac
->bSendAtom_nalloc
;
450 spac
->bSendAtom_nalloc
= over_alloc_dd(nat_tot_specat
);
451 srenew(spac
->bSendAtom
, spac
->bSendAtom_nalloc
);
452 for (i
= nalloc_old
; i
< spac
->bSendAtom_nalloc
; i
++)
454 spac
->bSendAtom
[i
] = FALSE
;
457 spas
= &spac
->spas
[d
][dir
];
458 n0
= spac
->nreq
[d
][dir
][0];
459 nr
= spac
->nreq
[d
][dir
][1];
462 fprintf(debug
, "dim=%d, dir=%d, searching for %d atoms\n",
468 for (i
= 0; i
< nr
; i
++)
470 indr
= ireq
->ind
[start
+i
];
472 /* Check if this is a home atom and if so ind will be set */
473 if (!ga2la_get_home(dd
->ga2la
, indr
, &ind
))
475 /* Search in the communicated atoms */
476 ind
= gmx_hash_get_minone(ga2la_specat
, indr
);
480 if (i
< n0
|| !spac
->bSendAtom
[ind
])
482 if (spas
->nsend
+1 > spas
->a_nalloc
)
484 spas
->a_nalloc
= over_alloc_large(spas
->nsend
+1);
485 srenew(spas
->a
, spas
->a_nalloc
);
487 /* Store the local index so we know which coordinates
490 spas
->a
[spas
->nsend
] = ind
;
491 spac
->bSendAtom
[ind
] = TRUE
;
492 if (spas
->nsend
+1 > spac
->ibuf_nalloc
)
494 spac
->ibuf_nalloc
= over_alloc_large(spas
->nsend
+1);
495 srenew(spac
->ibuf
, spac
->ibuf_nalloc
);
497 /* Store the global index so we can send it now */
498 spac
->ibuf
[spas
->nsend
] = indr
;
508 /* Clear the local flags */
509 for (i
= 0; i
< spas
->nsend
; i
++)
511 spac
->bSendAtom
[spas
->a
[i
]] = FALSE
;
513 /* Send and receive the number of indices to communicate */
514 nsend
[1] = spas
->nsend
;
515 dd_sendrecv_int(dd
, d
, dir
== 0 ? dddirBackward
: dddirForward
,
519 fprintf(debug
, "Send to rank %d, %d (%d) indices, "
520 "receive from rank %d, %d (%d) indices\n",
521 dd
->neighbor
[d
][1-dir
], nsend
[1], nsend
[0],
522 dd
->neighbor
[d
][dir
], buf
[1], buf
[0]);
525 for (i
= 0; i
< spas
->nsend
; i
++)
527 fprintf(debug
, " %d", spac
->ibuf
[i
]+1);
529 fprintf(debug
, "\n");
532 nrecv_local
+= buf
[0];
533 spas
->nrecv
= buf
[1];
534 if (nat_tot_specat
+ spas
->nrecv
> dd
->gatindex_nalloc
)
536 dd
->gatindex_nalloc
=
537 over_alloc_dd(nat_tot_specat
+ spas
->nrecv
);
538 srenew(dd
->gatindex
, dd
->gatindex_nalloc
);
540 /* Send and receive the indices */
541 dd_sendrecv_int(dd
, d
, dir
== 0 ? dddirBackward
: dddirForward
,
542 spac
->ibuf
, spas
->nsend
,
543 dd
->gatindex
+nat_tot_specat
, spas
->nrecv
);
544 nat_tot_specat
+= spas
->nrecv
;
547 /* Allocate the x/f communication buffers */
548 ns
= spac
->spas
[d
][0].nsend
;
549 nr
= spac
->spas
[d
][0].nrecv
;
552 ns
+= spac
->spas
[d
][1].nsend
;
553 nr
+= spac
->spas
[d
][1].nrecv
;
555 if (vbuf_fac
*ns
> spac
->vbuf_nalloc
)
557 spac
->vbuf_nalloc
= over_alloc_dd(vbuf_fac
*ns
);
558 srenew(spac
->vbuf
, spac
->vbuf_nalloc
);
560 if (vbuf_fac
== 2 && vbuf_fac
*nr
> spac
->vbuf2_nalloc
)
562 spac
->vbuf2_nalloc
= over_alloc_dd(vbuf_fac
*nr
);
563 srenew(spac
->vbuf2
, spac
->vbuf2_nalloc
);
566 /* Make a global to local index for the communication atoms */
567 for (i
= nat_tot_prev
; i
< nat_tot_specat
; i
++)
569 gmx_hash_change_or_set(ga2la_specat
, dd
->gatindex
[i
], i
);
573 /* Check that in the end we got the number of atoms we asked for */
574 if (nrecv_local
!= ireq
->n
)
578 fprintf(debug
, "Requested %d, received %d (tot recv %d)\n",
579 ireq
->n
, nrecv_local
, nat_tot_specat
-at_start
);
582 for (i
= 0; i
< ireq
->n
; i
++)
584 ind
= gmx_hash_get_minone(ga2la_specat
, ireq
->ind
[i
]);
585 fprintf(debug
, " %s%d",
586 (ind
>= 0) ? "" : "!",
589 fprintf(debug
, "\n");
592 fprintf(stderr
, "\nDD cell %d %d %d: Neighboring cells do not have atoms:",
593 dd
->ci
[XX
], dd
->ci
[YY
], dd
->ci
[ZZ
]);
594 for (i
= 0; i
< ireq
->n
; i
++)
596 if (gmx_hash_get_minone(ga2la_specat
, ireq
->ind
[i
]) < 0)
598 fprintf(stderr
, " %d", ireq
->ind
[i
]+1);
601 fprintf(stderr
, "\n");
602 gmx_fatal(FARGS
, "DD cell %d %d %d could only obtain %d of the %d atoms that are connected via %ss from the neighboring cells. This probably means your %s lengths are too long compared to the domain decomposition cell size. Decrease the number of domain decomposition grid cells%s%s.",
603 dd
->ci
[XX
], dd
->ci
[YY
], dd
->ci
[ZZ
],
604 nrecv_local
, ireq
->n
, specat_type
,
605 specat_type
, add_err
,
606 dd_dlb_is_on(dd
) ? " or use the -rcon option of mdrun" : "");
609 spac
->at_start
= at_start
;
610 spac
->at_end
= nat_tot_specat
;
614 fprintf(debug
, "Done setup_specat_communication\n");
617 return nat_tot_specat
;
620 gmx_domdec_specat_comm_t
*specat_comm_init(int nthread
)
622 gmx_domdec_specat_comm_t
*spac
;
625 spac
->nthread
= nthread
;
626 snew(spac
->ireq
, spac
->nthread
);