clang-tidy modernize
[gromacs.git] / src / gromacs / domdec / domdec_specatomcomm.cpp
blob8707dfd066ffcc8c4fa104d4dffdd487f4d3fee8
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2006,2007,2008,2009,2010,2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \internal \file
38 * \brief This file implements functions for domdec to use
39 * while managing inter-atomic constraints.
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_domdec
45 #include "gmxpre.h"
47 #include "domdec_specatomcomm.h"
49 #include <cassert>
51 #include <algorithm>
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_network.h"
55 #include "gromacs/domdec/domdec_struct.h"
56 #include "gromacs/domdec/ga2la.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/mdtypes/commrec.h"
59 #include "gromacs/pbcutil/ishift.h"
60 #include "gromacs/utility/fatalerror.h"
61 #include "gromacs/utility/gmxassert.h"
63 #include "hash.h"
65 void dd_move_f_specat(gmx_domdec_t *dd, gmx_domdec_specat_comm_t *spac,
66 rvec *f, rvec *fshift)
68 gmx_specatsend_t *spas;
69 rvec *vbuf;
70 int n, n0, n1, dim, dir;
71 ivec vis;
72 int is;
73 gmx_bool bPBC, bScrew;
75 n = spac->at_end;
76 for (int d = dd->ndim - 1; d >= 0; d--)
78 dim = dd->dim[d];
79 if (dd->nc[dim] > 2)
81 /* Pulse the grid forward and backward */
82 spas = spac->spas[d];
83 n0 = spas[0].nrecv;
84 n1 = spas[1].nrecv;
85 n -= n1 + n0;
86 vbuf = as_rvec_array(spac->vbuf.data());
87 /* Send and receive the coordinates */
88 dd_sendrecv2_rvec(dd, d,
89 f + n + n1, n0, vbuf, spas[0].a.size(),
90 f + n, n1, vbuf + spas[0].a.size(),
91 spas[1].a.size());
92 for (dir = 0; dir < 2; dir++)
94 bPBC = ((dir == 0 && dd->ci[dim] == 0) ||
95 (dir == 1 && dd->ci[dim] == dd->nc[dim]-1));
96 bScrew = (bPBC && dd->bScrewPBC && dim == XX);
98 spas = &spac->spas[d][dir];
99 /* Sum the buffer into the required forces */
100 if (!bPBC || (!bScrew && fshift == nullptr))
102 for (int a : spas->a)
104 rvec_inc(f[a], *vbuf);
105 vbuf++;
108 else
110 clear_ivec(vis);
111 vis[dim] = (dir == 0 ? 1 : -1);
112 is = IVEC2IS(vis);
113 if (!bScrew)
115 /* Sum and add to shift forces */
116 for (int a : spas->a)
118 rvec_inc(f[a], *vbuf);
119 rvec_inc(fshift[is], *vbuf);
120 vbuf++;
123 else
125 /* Rotate the forces */
126 for (int a : spas->a)
128 f[a][XX] += (*vbuf)[XX];
129 f[a][YY] -= (*vbuf)[YY];
130 f[a][ZZ] -= (*vbuf)[ZZ];
131 if (fshift)
133 rvec_inc(fshift[is], *vbuf);
135 vbuf++;
141 else
143 /* Two cells, so we only need to communicate one way */
144 spas = &spac->spas[d][0];
145 n -= spas->nrecv;
146 /* Send and receive the coordinates */
147 ddSendrecv(dd, d, dddirForward,
148 f + n, spas->nrecv,
149 as_rvec_array(spac->vbuf.data()), spas->a.size());
150 /* Sum the buffer into the required forces */
151 if (dd->bScrewPBC && dim == XX &&
152 (dd->ci[dim] == 0 ||
153 dd->ci[dim] == dd->nc[dim]-1))
155 int i = 0;
156 for (int a : spas->a)
158 /* Rotate the force */
159 f[a][XX] += spac->vbuf[i][XX];
160 f[a][YY] -= spac->vbuf[i][YY];
161 f[a][ZZ] -= spac->vbuf[i][ZZ];
162 i++;
165 else
167 int i = 0;
168 for (int a : spas->a)
170 rvec_inc(f[a], spac->vbuf[i]);
171 i++;
178 void dd_move_x_specat(gmx_domdec_t *dd, gmx_domdec_specat_comm_t *spac,
179 const matrix box,
180 rvec *x0,
181 rvec *x1, gmx_bool bX1IsCoord)
183 gmx_specatsend_t *spas;
184 int nvec, v, n, nn, ns0, ns1, nr0, nr1, nr, d, dim, dir, i;
185 gmx_bool bPBC, bScrew = FALSE;
186 rvec shift = {0, 0, 0};
188 nvec = 1;
189 if (x1 != nullptr)
191 nvec++;
194 n = spac->at_start;
195 for (d = 0; d < dd->ndim; d++)
197 dim = dd->dim[d];
198 if (dd->nc[dim] > 2)
200 /* Pulse the grid forward and backward */
201 rvec *vbuf = as_rvec_array(spac->vbuf.data());
202 for (dir = 0; dir < 2; dir++)
204 if (dir == 0 && dd->ci[dim] == 0)
206 bPBC = TRUE;
207 bScrew = (dd->bScrewPBC && dim == XX);
208 copy_rvec(box[dim], shift);
210 else if (dir == 1 && dd->ci[dim] == dd->nc[dim]-1)
212 bPBC = TRUE;
213 bScrew = (dd->bScrewPBC && dim == XX);
214 for (i = 0; i < DIM; i++)
216 shift[i] = -box[dim][i];
219 else
221 bPBC = FALSE;
222 bScrew = FALSE;
224 spas = &spac->spas[d][dir];
225 for (v = 0; v < nvec; v++)
227 rvec *x = (v == 0 ? x0 : x1);
228 /* Copy the required coordinates to the send buffer */
229 if (!bPBC || (v == 1 && !bX1IsCoord))
231 /* Only copy */
232 for (int a : spas->a)
234 copy_rvec(x[a], *vbuf);
235 vbuf++;
238 else if (!bScrew)
240 /* Shift coordinates */
241 for (int a : spas->a)
243 rvec_add(x[a], shift, *vbuf);
244 vbuf++;
247 else
249 /* Shift and rotate coordinates */
250 for (int a : spas->a)
252 (*vbuf)[XX] = x[a][XX] + shift[XX];
253 (*vbuf)[YY] = box[YY][YY] - x[a][YY] + shift[YY];
254 (*vbuf)[ZZ] = box[ZZ][ZZ] - x[a][ZZ] + shift[ZZ];
255 vbuf++;
260 /* Send and receive the coordinates */
261 spas = spac->spas[d];
262 ns0 = spas[0].a.size();
263 nr0 = spas[0].nrecv;
264 ns1 = spas[1].a.size();
265 nr1 = spas[1].nrecv;
266 if (nvec == 1)
268 rvec *vbuf = as_rvec_array(spac->vbuf.data());
269 dd_sendrecv2_rvec(dd, d,
270 vbuf + ns0, ns1, x0 + n, nr1,
271 vbuf, ns0, x0 + n + nr1, nr0);
273 else
275 rvec *vbuf = as_rvec_array(spac->vbuf.data());
276 /* Communicate both vectors in one buffer */
277 rvec *rbuf = as_rvec_array(spac->vbuf2.data());
278 dd_sendrecv2_rvec(dd, d,
279 vbuf + 2*ns0, 2*ns1, rbuf, 2*nr1,
280 vbuf, 2*ns0, rbuf + 2*nr1, 2*nr0);
281 /* Split the buffer into the two vectors */
282 nn = n;
283 for (dir = 1; dir >= 0; dir--)
285 nr = spas[dir].nrecv;
286 for (v = 0; v < 2; v++)
288 rvec *x = (v == 0 ? x0 : x1);
289 for (i = 0; i < nr; i++)
291 copy_rvec(*rbuf, x[nn+i]);
292 rbuf++;
295 nn += nr;
298 n += nr0 + nr1;
300 else
302 spas = &spac->spas[d][0];
303 /* Copy the required coordinates to the send buffer */
304 rvec *vbuf = as_rvec_array(spac->vbuf.data());
305 for (v = 0; v < nvec; v++)
307 rvec *x = (v == 0 ? x0 : x1);
308 if (dd->bScrewPBC && dim == XX &&
309 (dd->ci[XX] == 0 || dd->ci[XX] == dd->nc[XX]-1))
311 /* Here we only perform the rotation, the rest of the pbc
312 * is handled in the constraint or viste routines.
314 for (int a : spas->a)
316 (*vbuf)[XX] = x[a][XX];
317 (*vbuf)[YY] = box[YY][YY] - x[a][YY];
318 (*vbuf)[ZZ] = box[ZZ][ZZ] - x[a][ZZ];
319 vbuf++;
322 else
324 for (int a : spas->a)
326 copy_rvec(x[a], *vbuf);
327 vbuf++;
331 /* Send and receive the coordinates */
332 if (nvec == 1)
334 rvec *vbuf = as_rvec_array(spac->vbuf.data());
335 ddSendrecv(dd, d, dddirBackward,
336 vbuf, spas->a.size(), x0 + n, spas->nrecv);
338 else
340 rvec *vbuf = as_rvec_array(spac->vbuf.data());
341 /* Communicate both vectors in one buffer */
342 rvec *rbuf = as_rvec_array(spac->vbuf2.data());
343 ddSendrecv(dd, d, dddirBackward,
344 vbuf, 2*spas->a.size(), rbuf, 2*spas->nrecv);
345 /* Split the buffer into the two vectors */
346 nr = spas[0].nrecv;
347 for (v = 0; v < 2; v++)
349 rvec *x = (v == 0 ? x0 : x1);
350 for (i = 0; i < nr; i++)
352 copy_rvec(*rbuf, x[n+i]);
353 rbuf++;
357 n += spas->nrecv;
362 int setup_specat_communication(gmx_domdec_t *dd,
363 std::vector<int> *ireq,
364 gmx_domdec_specat_comm_t *spac,
365 gmx_hash_t *ga2la_specat,
366 int at_start,
367 int vbuf_fac,
368 const char *specat_type,
369 const char *add_err)
371 int nsend[2], nlast, nsend_zero[2] = {0, 0}, *nsend_ptr;
372 int dim, ndir, nr, ns, nrecv_local, n0, start, indr, ind, buf[2];
373 int nat_tot_specat, nat_tot_prev;
374 gmx_bool bPBC;
375 gmx_specatsend_t *spas;
377 if (debug)
379 fprintf(debug, "Begin setup_specat_communication for %s\n", specat_type);
382 /* nsend[0]: the number of atoms requested by this node only,
383 * we communicate this for more efficients checks
384 * nsend[1]: the total number of requested atoms
386 const int numRequested = ireq->size();
387 nsend[0] = ireq->size();
388 nsend[1] = nsend[0];
389 nlast = nsend[1];
390 for (int d = dd->ndim-1; d >= 0; d--)
392 /* Pulse the grid forward and backward */
393 dim = dd->dim[d];
394 bPBC = (dim < dd->npbcdim);
395 if (dd->nc[dim] == 2)
397 /* Only 2 cells, so we only need to communicate once */
398 ndir = 1;
400 else
402 ndir = 2;
404 for (int dir = 0; dir < ndir; dir++)
406 if (!bPBC &&
407 dd->nc[dim] > 2 &&
408 ((dir == 0 && dd->ci[dim] == dd->nc[dim] - 1) ||
409 (dir == 1 && dd->ci[dim] == 0)))
411 /* No pbc: the fist/last cell should not request atoms */
412 nsend_ptr = nsend_zero;
414 else
416 nsend_ptr = nsend;
418 /* Communicate the number of indices */
419 ddSendrecv(dd, d, dir == 0 ? dddirForward : dddirBackward,
420 nsend_ptr, 2, spac->nreq[d][dir], 2);
421 nr = spac->nreq[d][dir][1];
422 ireq->resize(nlast + nr);
423 /* Communicate the indices */
424 ddSendrecv(dd, d, dir == 0 ? dddirForward : dddirBackward,
425 ireq->data(), nsend_ptr[1], ireq->data() + nlast, nr);
426 nlast += nr;
428 nsend[1] = nlast;
430 if (debug)
432 fprintf(debug, "Communicated the counts\n");
435 /* Search for the requested atoms and communicate the indices we have */
436 nat_tot_specat = at_start;
437 nrecv_local = 0;
438 for (int d = 0; d < dd->ndim; d++)
440 /* Pulse the grid forward and backward */
441 if (dd->dim[d] >= dd->npbcdim || dd->nc[dd->dim[d]] > 2)
443 ndir = 2;
445 else
447 ndir = 1;
449 nat_tot_prev = nat_tot_specat;
450 for (int dir = ndir - 1; dir >= 0; dir--)
452 /* To avoid cost of clearing by resize(), we only increase size */
453 if (static_cast<size_t>(nat_tot_specat) > spac->sendAtom.size())
455 /* Note: resize initializes new elements to false, which is actually needed here */
456 spac->sendAtom.resize(nat_tot_specat);
458 spas = &spac->spas[d][dir];
459 n0 = spac->nreq[d][dir][0];
460 nr = spac->nreq[d][dir][1];
461 if (debug)
463 fprintf(debug, "dim=%d, dir=%d, searching for %d atoms\n",
464 d, dir, nr);
466 start = nlast - nr;
467 spas->a.clear();
468 spac->ibuf.clear();
469 nsend[0] = 0;
470 for (int i = 0; i < nr; i++)
472 indr = (*ireq)[start + i];
473 ind = -1;
474 /* Check if this is a home atom and if so ind will be set */
475 if (!ga2la_get_home(dd->ga2la, indr, &ind))
477 /* Search in the communicated atoms */
478 ind = gmx_hash_get_minone(ga2la_specat, indr);
480 if (ind >= 0)
482 if (i < n0 || !spac->sendAtom[ind])
484 /* Store the local index so we know which coordinates
485 * to send out later.
487 spas->a.push_back(ind);
488 spac->sendAtom[ind] = true;
489 /* Store the global index so we can send it now */
490 spac->ibuf.push_back(indr);
491 if (i < n0)
493 nsend[0]++;
498 nlast = start;
499 /* Clear the local flags */
500 for (int a : spas->a)
502 spac->sendAtom[a] = false;
504 /* Send and receive the number of indices to communicate */
505 nsend[1] = spas->a.size();
506 ddSendrecv(dd, d, dir == 0 ? dddirBackward : dddirForward,
507 nsend, 2, buf, 2);
508 if (debug)
510 fprintf(debug, "Send to rank %d, %d (%d) indices, "
511 "receive from rank %d, %d (%d) indices\n",
512 dd->neighbor[d][1-dir], nsend[1], nsend[0],
513 dd->neighbor[d][dir], buf[1], buf[0]);
514 if (gmx_debug_at)
516 for (int i : spac->ibuf)
518 fprintf(debug, " %d", i + 1);
520 fprintf(debug, "\n");
523 nrecv_local += buf[0];
524 spas->nrecv = buf[1];
525 dd->globalAtomIndices.resize(nat_tot_specat + spas->nrecv);
526 /* Send and receive the indices */
527 ddSendrecv(dd, d, dir == 0 ? dddirBackward : dddirForward,
528 spac->ibuf.data(), spac->ibuf.size(),
529 dd->globalAtomIndices.data() + nat_tot_specat, spas->nrecv);
530 nat_tot_specat += spas->nrecv;
533 /* Increase the x/f communication buffer sizes, when necessary */
534 ns = spac->spas[d][0].a.size();
535 nr = spac->spas[d][0].nrecv;
536 if (ndir == 2)
538 ns += spac->spas[d][1].a.size();
539 nr += spac->spas[d][1].nrecv;
541 if (vbuf_fac*ns > gmx::index(spac->vbuf.size()))
543 spac->vbuf.resize(vbuf_fac*ns);
545 if (vbuf_fac == 2 && vbuf_fac*nr > gmx::index(spac->vbuf2.size()))
547 spac->vbuf2.resize(vbuf_fac*nr);
550 /* Make a global to local index for the communication atoms */
551 for (int i = nat_tot_prev; i < nat_tot_specat; i++)
553 gmx_hash_change_or_set(ga2la_specat, dd->globalAtomIndices[i], i);
557 /* Check that in the end we got the number of atoms we asked for */
558 if (nrecv_local != numRequested)
560 if (debug)
562 fprintf(debug, "Requested %d, received %d (tot recv %d)\n",
563 numRequested, nrecv_local, nat_tot_specat - at_start);
564 if (gmx_debug_at)
566 for (int i = 0; i < numRequested; i++)
568 int ind = gmx_hash_get_minone(ga2la_specat, (*ireq)[i]);
569 fprintf(debug, " %s%d",
570 (ind >= 0) ? "" : "!",
571 (*ireq)[i] + 1);
573 fprintf(debug, "\n");
576 fprintf(stderr, "\nDD cell %d %d %d: Neighboring cells do not have atoms:",
577 dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
578 for (int i = 0; i < numRequested; i++)
580 if (gmx_hash_get_minone(ga2la_specat, (*ireq)[i]) < 0)
582 fprintf(stderr, " %d", (*ireq)[i] + 1);
585 fprintf(stderr, "\n");
586 gmx_fatal(FARGS, "DD cell %d %d %d could only obtain %d of the %d atoms that are connected via %ss from the neighboring cells. This probably means your %s lengths are too long compared to the domain decomposition cell size. Decrease the number of domain decomposition grid cells%s%s.",
587 dd->ci[XX], dd->ci[YY], dd->ci[ZZ],
588 nrecv_local, numRequested, specat_type,
589 specat_type, add_err,
590 dd_dlb_is_on(dd) ? " or use the -rcon option of mdrun" : "");
593 spac->at_start = at_start;
594 spac->at_end = nat_tot_specat;
596 if (debug)
598 fprintf(debug, "Done setup_specat_communication\n");
601 return nat_tot_specat;