1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This file is part of Gromacs Copyright (c) 1991-2008
5 * David van der Spoel, Erik Lindahl, Berk Hess, University of Groningen.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * To help us fund GROMACS development, we humbly ask that you cite
13 * the research papers on the package. Check out http://www.gromacs.org
16 * Gnomes, ROck Monsters And Chili Sauce
24 #include "domdec_network.h"
34 #define DDMASTERRANK(dd) (dd->masterrank)
37 void dd_sendrecv_int(const gmx_domdec_t
*dd
,
38 int ddimind
,int direction
,
46 rank_s
= dd
->neighbor
[ddimind
][direction
==dddirForward
? 0 : 1];
47 rank_r
= dd
->neighbor
[ddimind
][direction
==dddirForward
? 1 : 0];
51 MPI_Sendrecv(buf_s
,n_s
*sizeof(int),MPI_BYTE
,rank_s
,0,
52 buf_r
,n_r
*sizeof(int),MPI_BYTE
,rank_r
,0,
53 dd
->mpi_comm_all
,&stat
);
57 MPI_Send( buf_s
,n_s
*sizeof(int),MPI_BYTE
,rank_s
,0,
62 MPI_Recv( buf_r
,n_r
*sizeof(int),MPI_BYTE
,rank_r
,0,
63 dd
->mpi_comm_all
,&stat
);
69 void dd_sendrecv_real(const gmx_domdec_t
*dd
,
70 int ddimind
,int direction
,
78 rank_s
= dd
->neighbor
[ddimind
][direction
==dddirForward
? 0 : 1];
79 rank_r
= dd
->neighbor
[ddimind
][direction
==dddirForward
? 1 : 0];
83 MPI_Sendrecv(buf_s
,n_s
*sizeof(real
),MPI_BYTE
,rank_s
,0,
84 buf_r
,n_r
*sizeof(real
),MPI_BYTE
,rank_r
,0,
85 dd
->mpi_comm_all
,&stat
);
89 MPI_Send( buf_s
,n_s
*sizeof(real
),MPI_BYTE
,rank_s
,0,
94 MPI_Recv( buf_r
,n_r
*sizeof(real
),MPI_BYTE
,rank_r
,0,
95 dd
->mpi_comm_all
,&stat
);
101 void dd_sendrecv_rvec(const gmx_domdec_t
*dd
,
102 int ddimind
,int direction
,
110 rank_s
= dd
->neighbor
[ddimind
][direction
==dddirForward
? 0 : 1];
111 rank_r
= dd
->neighbor
[ddimind
][direction
==dddirForward
? 1 : 0];
115 MPI_Sendrecv(buf_s
[0],n_s
*sizeof(rvec
),MPI_BYTE
,rank_s
,0,
116 buf_r
[0],n_r
*sizeof(rvec
),MPI_BYTE
,rank_r
,0,
117 dd
->mpi_comm_all
,&stat
);
121 MPI_Send( buf_s
[0],n_s
*sizeof(rvec
),MPI_BYTE
,rank_s
,0,
125 MPI_Recv( buf_r
[0],n_r
*sizeof(rvec
),MPI_BYTE
,rank_r
,0,
126 dd
->mpi_comm_all
,&stat
);
132 void dd_sendrecv2_rvec(const gmx_domdec_t
*dd
,
134 rvec
*buf_s_fw
,int n_s_fw
,
135 rvec
*buf_r_fw
,int n_r_fw
,
136 rvec
*buf_s_bw
,int n_s_bw
,
137 rvec
*buf_r_bw
,int n_r_bw
)
140 int rank_fw
,rank_bw
,nreq
;
144 rank_fw
= dd
->neighbor
[ddimind
][0];
145 rank_bw
= dd
->neighbor
[ddimind
][1];
149 /* Try to send and receive in two directions simultaneously.
150 * Should be faster, especially on machines
151 * with full 3D communication networks.
152 * However, it could be that communication libraries are
153 * optimized for MPI_Sendrecv and non-blocking MPI calls
155 * SendRecv2 can be turned on with the env.var. GMX_DD_SENDRECV2
160 MPI_Irecv(buf_r_fw
[0],n_r_fw
*sizeof(rvec
),MPI_BYTE
,
161 rank_bw
,0,dd
->mpi_comm_all
,&req
[nreq
++]);
165 MPI_Irecv(buf_r_bw
[0],n_r_bw
*sizeof(rvec
),MPI_BYTE
,
166 rank_fw
,1,dd
->mpi_comm_all
,&req
[nreq
++]);
170 MPI_Isend(buf_s_fw
[0],n_s_fw
*sizeof(rvec
),MPI_BYTE
,
171 rank_fw
,0,dd
->mpi_comm_all
,&req
[nreq
++]);
175 MPI_Isend(buf_s_bw
[0],n_s_bw
*sizeof(rvec
),MPI_BYTE
,
176 rank_bw
,1,dd
->mpi_comm_all
,&req
[nreq
++]);
180 MPI_Waitall(nreq
,req
,stat
);
185 /* Communicate in two ordered phases.
186 * This is slower, even on a dual-core Opteron cluster
187 * with a single full-duplex network connection per machine.
190 MPI_Sendrecv(buf_s_fw
[0],n_s_fw
*sizeof(rvec
),MPI_BYTE
,rank_fw
,0,
191 buf_r_fw
[0],n_r_fw
*sizeof(rvec
),MPI_BYTE
,rank_bw
,0,
192 dd
->mpi_comm_all
,&stat
[0]);
194 MPI_Sendrecv(buf_s_bw
[0],n_s_bw
*sizeof(rvec
),MPI_BYTE
,rank_bw
,0,
195 buf_r_bw
[0],n_r_bw
*sizeof(rvec
),MPI_BYTE
,rank_fw
,0,
196 dd
->mpi_comm_all
,&stat
[0]);
201 void dd_bcast(gmx_domdec_t
*dd
,int nbytes
,void *data
)
204 MPI_Bcast(data
,nbytes
,MPI_BYTE
,
205 DDMASTERRANK(dd
),dd
->mpi_comm_all
);
209 void dd_bcastc(gmx_domdec_t
*dd
,int nbytes
,void *src
,void *dest
)
213 memcpy(dest
,src
,nbytes
);
216 MPI_Bcast(dest
,nbytes
,MPI_BYTE
,
217 DDMASTERRANK(dd
),dd
->mpi_comm_all
);
221 void dd_scatter(gmx_domdec_t
*dd
,int nbytes
,void *src
,void *dest
)
224 MPI_Scatter(src
,nbytes
,MPI_BYTE
,
225 dest
,nbytes
,MPI_BYTE
,
226 DDMASTERRANK(dd
),dd
->mpi_comm_all
);
230 void dd_gather(gmx_domdec_t
*dd
,int nbytes
,void *src
,void *dest
)
233 MPI_Gather(src
,nbytes
,MPI_BYTE
,
234 dest
,nbytes
,MPI_BYTE
,
235 DDMASTERRANK(dd
),dd
->mpi_comm_all
);
239 void dd_scatterv(gmx_domdec_t
*dd
,
240 int *scounts
,int *disps
,void *sbuf
,
241 int rcount
,void *rbuf
)
244 MPI_Scatterv(sbuf
,scounts
,disps
,MPI_BYTE
,
245 rbuf
,rcount
,MPI_BYTE
,
246 DDMASTERRANK(dd
),dd
->mpi_comm_all
);
250 void dd_gatherv(gmx_domdec_t
*dd
,
251 int scount
,void *sbuf
,
252 int *rcounts
,int *disps
,void *rbuf
)
255 MPI_Gatherv(sbuf
,scount
,MPI_BYTE
,
256 rbuf
,rcounts
,disps
,MPI_BYTE
,
257 DDMASTERRANK(dd
),dd
->mpi_comm_all
);