From 99c978fecdf9c607edc3c9e5c220d641ac89e56b Mon Sep 17 00:00:00 2001 From: Berk Hess Date: Wed, 19 Jun 2019 09:46:19 +0200 Subject: [PATCH] Remove unnecessary #if GMX_MPI in pme code Change-Id: I1c645c84b229258527d4c40edb055a706189c2bf --- src/gromacs/ewald/pme.cpp | 51 +++++++++++----------------------------- src/gromacs/ewald/pme_grid.cpp | 14 ++++++++--- src/gromacs/ewald/pme_grid.h | 4 ---- src/gromacs/ewald/pme_internal.h | 2 -- 4 files changed, 25 insertions(+), 46 deletions(-) diff --git a/src/gromacs/ewald/pme.cpp b/src/gromacs/ewald/pme.cpp index 83ad456172..4ba4204237 100644 --- a/src/gromacs/ewald/pme.cpp +++ b/src/gromacs/ewald/pme.cpp @@ -385,23 +385,17 @@ PmeAtomComm::PmeAtomComm(MPI_Comm PmeMpiCommunicator, static void init_overlap_comm(pme_overlap_t * ol, int norder, -#if GMX_MPI MPI_Comm comm, -#endif int nnodes, int nodeid, int ndata, int commplainsize) { gmx_bool bCont; -#if GMX_MPI - MPI_Status stat; ol->mpi_comm = comm; -#endif - - ol->nnodes = nnodes; - ol->nodeid = nodeid; + ol->nnodes = nnodes; + ol->nodeid = nodeid; /* Linear translation of the PME grid won't affect reciprocal space * calculations, so to optimize we only interpolate "upwards", @@ -493,6 +487,7 @@ init_overlap_comm(pme_overlap_t * ol, #if GMX_MPI /* Communicate the buffer sizes to receive */ + MPI_Status stat; for (size_t b = 0; b < ol->comm_data.size(); b++) { MPI_Sendrecv(&ol->send_size, 1, MPI_INT, ol->comm_data[b].send_id, b, @@ -616,13 +611,14 @@ gmx_pme_t *gmx_pme_init(const t_commrec *cr, pme->nnodes_major = numPmeDomains.x; pme->nnodes_minor = numPmeDomains.y; -#if GMX_MPI if (numPmeDomains.x*numPmeDomains.y > 1) { pme->mpi_comm = cr->mpi_comm_mygroup; +#if GMX_MPI MPI_Comm_rank(pme->mpi_comm, &pme->nodeid); MPI_Comm_size(pme->mpi_comm, &pme->nnodes); +#endif if (pme->nnodes != numPmeDomains.x*numPmeDomains.y) { gmx_incons("PME rank count mismatch"); @@ -632,40 +628,33 @@ gmx_pme_t *gmx_pme_init(const t_commrec *cr, { pme->mpi_comm = MPI_COMM_NULL; } -#endif if (pme->nnodes == 1) { -#if GMX_MPI pme->mpi_comm_d[0] = MPI_COMM_NULL; pme->mpi_comm_d[1] = MPI_COMM_NULL; -#endif - pme->ndecompdim = 0; - pme->nodeid_major = 0; - pme->nodeid_minor = 0; + pme->ndecompdim = 0; + pme->nodeid_major = 0; + pme->nodeid_minor = 0; } else { if (numPmeDomains.y == 1) { -#if GMX_MPI pme->mpi_comm_d[0] = pme->mpi_comm; pme->mpi_comm_d[1] = MPI_COMM_NULL; -#endif - pme->ndecompdim = 1; - pme->nodeid_major = pme->nodeid; - pme->nodeid_minor = 0; + pme->ndecompdim = 1; + pme->nodeid_major = pme->nodeid; + pme->nodeid_minor = 0; } else if (numPmeDomains.x == 1) { -#if GMX_MPI pme->mpi_comm_d[0] = MPI_COMM_NULL; pme->mpi_comm_d[1] = pme->mpi_comm; -#endif - pme->ndecompdim = 1; - pme->nodeid_major = 0; - pme->nodeid_minor = pme->nodeid; + pme->ndecompdim = 1; + pme->nodeid_major = 0; + pme->nodeid_minor = pme->nodeid; } else { @@ -788,9 +777,7 @@ gmx_pme_t *gmx_pme_init(const t_commrec *cr, * but we do need the overlap in x because of the communication order. */ init_overlap_comm(&pme->overlap[0], pme->pme_order, -#if GMX_MPI pme->mpi_comm_d[0], -#endif pme->nnodes_major, pme->nodeid_major, pme->nkx, (div_round_up(pme->nky, pme->nnodes_minor)+pme->pme_order)*(pme->nkz+pme->pme_order-1)); @@ -800,9 +787,7 @@ gmx_pme_t *gmx_pme_init(const t_commrec *cr, * extra for the offset. That's what the (+1)*pme->nkz is for. */ init_overlap_comm(&pme->overlap[1], pme->pme_order, -#if GMX_MPI pme->mpi_comm_d[1], -#endif pme->nnodes_minor, pme->nodeid_minor, pme->nky, (div_round_up(pme->nkx, pme->nnodes_major)+pme->pme_order+1)*pme->nkz); @@ -1226,12 +1211,10 @@ int gmx_pme_do(struct gmx_pme_t *pme, wrap_periodic_pmegrid(pme, grid); /* sum contributions to local grid from other nodes */ -#if GMX_MPI if (pme->nnodes > 1) { gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD); } -#endif copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index); } @@ -1334,12 +1317,10 @@ int gmx_pme_do(struct gmx_pme_t *pme, if (bBackFFT) { /* distribute local grid to all nodes */ -#if GMX_MPI if (pme->nnodes > 1) { gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD); } -#endif unwrap_periodic_pmegrid(pme, grid); } @@ -1486,12 +1467,10 @@ int gmx_pme_do(struct gmx_pme_t *pme, { wrap_periodic_pmegrid(pme, grid); /* sum contributions to local grid from other nodes */ -#if GMX_MPI if (pme->nnodes > 1) { gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_FORWARD); } -#endif copy_pmegrid_to_fftgrid(pme, grid, fftgrid, grid_index); } wallcycle_stop(wcycle, ewcPME_SPREAD); @@ -1604,12 +1583,10 @@ int gmx_pme_do(struct gmx_pme_t *pme, } /*#pragma omp parallel*/ /* distribute local grid to all nodes */ -#if GMX_MPI if (pme->nnodes > 1) { gmx_sum_qgrid_dd(pme, grid, GMX_SUM_GRID_BACKWARD); } -#endif unwrap_periodic_pmegrid(pme, grid); diff --git a/src/gromacs/ewald/pme_grid.cpp b/src/gromacs/ewald/pme_grid.cpp index 2f1b16d720..a3e42e9c63 100644 --- a/src/gromacs/ewald/pme_grid.cpp +++ b/src/gromacs/ewald/pme_grid.cpp @@ -65,9 +65,11 @@ */ #define GMX_CACHE_SEP 64 -#if GMX_MPI -void gmx_sum_qgrid_dd(struct gmx_pme_t *pme, real *grid, int direction) +void gmx_sum_qgrid_dd(gmx_pme_t *pme, + real *grid, + const int direction) { +#if GMX_MPI pme_overlap_t *overlap; int send_index0, send_nindex; int recv_index0, recv_nindex; @@ -231,8 +233,14 @@ void gmx_sum_qgrid_dd(struct gmx_pme_t *pme, real *grid, int direction) } } } +#else // GMX_MPI + GMX_UNUSED_VALUE(pme); + GMX_UNUSED_VALUE(grid); + GMX_UNUSED_VALUE(direction); + + GMX_RELEASE_ASSERT(false, "gmx_sum_qgrid_dd() should not be called without MPI"); +#endif // GMX_MPI } -#endif int copy_pmegrid_to_fftgrid(const gmx_pme_t *pme, const real *pmegrid, real *fftgrid, int grid_index) diff --git a/src/gromacs/ewald/pme_grid.h b/src/gromacs/ewald/pme_grid.h index 52fd095ca4..d14f84cb30 100644 --- a/src/gromacs/ewald/pme_grid.h +++ b/src/gromacs/ewald/pme_grid.h @@ -36,8 +36,6 @@ #ifndef GMX_EWALD_PME_GRID_H #define GMX_EWALD_PME_GRID_H -#include "config.h" - #include "gromacs/utility/basedefinitions.h" #include "gromacs/utility/real.h" @@ -58,10 +56,8 @@ constexpr int c_pmeNeighborUnitcellCount = 2*c_pmeMaxUnitcellShift + 1; struct pmegrid_t; struct pmegrids_t; -#if GMX_MPI void gmx_sum_qgrid_dd(gmx_pme_t *pme, real *grid, int direction); -#endif int copy_pmegrid_to_fftgrid(const gmx_pme_t *pme, const real *pmegrid, real *fftgrid, int grid_index); diff --git a/src/gromacs/ewald/pme_internal.h b/src/gromacs/ewald/pme_internal.h index 4f56d7607e..ee88e0ed28 100644 --- a/src/gromacs/ewald/pme_internal.h +++ b/src/gromacs/ewald/pme_internal.h @@ -131,9 +131,7 @@ struct pme_grid_comm_t /*! \brief Data structure for grid overlap communication in a single dimension */ struct pme_overlap_t { -#if GMX_MPI MPI_Comm mpi_comm; //!< MPI communcator -#endif int nnodes; //!< Number of ranks int nodeid; //!< Unique rank identifcator std::vector s2g0; //!< The local interpolation grid start -- 2.11.4.GIT