2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \libinternal \file
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
45 * \ingroup module_ewald
48 #ifndef GMX_EWALD_PME_H
49 #define GMX_EWALD_PME_H
53 #include "gromacs/math/vectypes.h"
54 #include "gromacs/timing/wallcycle.h"
55 #include "gromacs/timing/walltime_accounting.h"
56 #include "gromacs/utility/arrayref.h"
57 #include "gromacs/utility/basedefinitions.h"
58 #include "gromacs/utility/real.h"
60 struct interaction_const_t
;
65 struct gmx_wallclock_gpu_pme_t
;
66 struct gmx_device_info_t
;
71 class ForceWithVirial
;
76 GMX_SUM_GRID_FORWARD
, GMX_SUM_GRID_BACKWARD
79 /*! \brief Possible PME codepaths on a rank.
80 * \todo: make this enum class with gmx_pme_t C++ refactoring
84 None
, //!< No PME task is done
85 CPU
, //!< Whole PME computation is done on CPU
86 GPU
, //!< Whole PME computation is done on GPU
87 Mixed
, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
90 //! PME gathering output forces treatment
91 enum class PmeForceOutputHandling
93 Set
, /**< Gather simply writes into provided force buffer */
94 ReduceWithInput
, /**< Gather adds its output to the buffer.
95 On GPU, that means additional H2D copy before the kernel launch. */
98 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
99 int minimalPmeGridSize(int pmeOrder
);
101 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
103 * With errorsAreFatal=true, an exception or fatal error is generated
104 * on violation of restrictions.
105 * With errorsAreFatal=false, false is returned on violation of restrictions.
106 * When all restrictions are obeyed, true is returned.
107 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
108 * If at calling useThreads is unknown, pass true for conservative checking.
110 * The PME GPU restrictions are checked separately during pme_gpu_init().
112 bool gmx_pme_check_restrictions(int pme_order
,
113 int nkx
, int nky
, int nkz
,
116 bool errorsAreFatal
);
118 /*! \brief Construct PME data
120 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
121 * \returns Pointer to newly allocated and initialized PME data.
123 gmx_pme_t
*gmx_pme_init(const t_commrec
*cr
,
124 int nnodes_major
, int nnodes_minor
,
125 const t_inputrec
*ir
, int homenr
,
126 gmx_bool bFreeEnergy_q
, gmx_bool bFreeEnergy_lj
,
127 gmx_bool bReproducible
,
128 real ewaldcoeff_q
, real ewaldcoeff_lj
,
132 gmx_device_info_t
*gpuInfo
,
133 const gmx::MDLogger
&mdlog
);
135 /*! \brief Destroys the PME data structure.*/
136 void gmx_pme_destroy(gmx_pme_t
*pme
);
139 /*! \brief Flag values that control what gmx_pme_do() will calculate
141 * These can be combined with bitwise-OR if more than one thing is required.
143 #define GMX_PME_SPREAD (1<<0)
144 #define GMX_PME_SOLVE (1<<1)
145 #define GMX_PME_CALC_F (1<<2)
146 #define GMX_PME_CALC_ENER_VIR (1<<3)
147 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
148 #define GMX_PME_CALC_POT (1<<4)
150 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
153 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
155 * The meaning of \p flags is defined above, and determines which
156 * parts of the calculation are performed.
158 * \return 0 indicates all well, non zero is an error code.
160 int gmx_pme_do(struct gmx_pme_t
*pme
,
161 int start
, int homenr
,
163 real chargeA
[], real chargeB
[],
164 real c6A
[], real c6B
[],
165 real sigmaA
[], real sigmaB
[],
166 matrix box
, t_commrec
*cr
,
167 int maxshift_x
, int maxshift_y
,
168 t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
169 matrix vir_q
, matrix vir_lj
,
170 real
*energy_q
, real
*energy_lj
,
171 real lambda_q
, real lambda_lj
,
172 real
*dvdlambda_q
, real
*dvdlambda_lj
,
175 /*! \brief Called on the nodes that do PME exclusively (as slaves) */
176 int gmx_pmeonly(struct gmx_pme_t
*pme
,
177 struct t_commrec
*cr
, t_nrnb
*mynrnb
,
178 gmx_wallcycle_t wcycle
,
179 gmx_walltime_accounting_t walltime_accounting
,
180 t_inputrec
*ir
, PmeRunMode runMode
);
182 /*! \brief Calculate the PME grid energy V for n charges.
184 * The potential (found in \p pme) must have been found already with a
185 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
186 * specified. Note that the charges are not spread on the grid in the
187 * pme struct. Currently does not work in parallel or with free
190 void gmx_pme_calc_energy(struct gmx_pme_t
*pme
, int n
, rvec
*x
, real
*q
, real
*V
);
192 /*! \brief Send the charges and maxshift to out PME-only node. */
193 void gmx_pme_send_parameters(struct t_commrec
*cr
,
194 const interaction_const_t
*ic
,
195 gmx_bool bFreeEnergy_q
, gmx_bool bFreeEnergy_lj
,
196 real
*chargeA
, real
*chargeB
,
197 real
*sqrt_c6A
, real
*sqrt_c6B
,
198 real
*sigmaA
, real
*sigmaB
,
199 int maxshift_x
, int maxshift_y
);
201 /*! \brief Send the coordinates to our PME-only node and request a PME calculation */
202 void gmx_pme_send_coordinates(struct t_commrec
*cr
, matrix box
, rvec
*x
,
203 real lambda_q
, real lambda_lj
,
207 /*! \brief Tell our PME-only node to finish */
208 void gmx_pme_send_finish(struct t_commrec
*cr
);
210 /*! \brief Tell our PME-only node to reset all cycle and flop counters */
211 void gmx_pme_send_resetcounters(struct t_commrec
*cr
, gmx_int64_t step
);
213 /*! \brief PP nodes receive the long range forces from the PME nodes */
214 void gmx_pme_receive_f(struct t_commrec
*cr
,
215 gmx::ForceWithVirial
*forceWithVirial
,
216 real
*energy_q
, real
*energy_lj
,
217 real
*dvdlambda_q
, real
*dvdlambda_lj
,
221 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
222 * TODO: it should update the PME CPU atom data as well.
223 * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
225 * \param[in] pme The PME structure.
226 * \param[in] nAtoms The number of particles.
227 * \param[in] charges The pointer to the array of particle charges.
229 void gmx_pme_reinit_atoms(const gmx_pme_t
*pme
, const int nAtoms
, const real
*charges
);
231 /* A block of PME GPU functions */
233 /*! \brief Checks whether the input system allows to run PME on GPU.
234 * TODO: this mostly duplicates an internal PME assert function
235 * pme_gpu_check_restrictions(), except that works with a
236 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
238 * \param[in] ir Input system.
239 * \param[out] error The error message if the input is not supported on GPU.
241 * \returns true if PME can run on GPU with this input, false otherwise.
243 bool pme_gpu_supports_input(const t_inputrec
*ir
, std::string
*error
);
246 * Returns the active PME codepath (CPU, GPU, mixed).
247 * \todo This is a rather static data that should be managed by the higher level task scheduler.
249 * \param[in] pme The PME data structure.
250 * \returns active PME codepath.
252 PmeRunMode
pme_run_mode(const gmx_pme_t
*pme
);
255 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
256 * \todo This is a rather static data that should be managed by the hardware assignment manager.
257 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
259 * \param[in] pme The PME data structure.
260 * \returns true if PME can run on GPU, false otherwise.
262 inline bool pme_gpu_task_enabled(const gmx_pme_t
*pme
)
264 return (pme
!= nullptr) && (pme_run_mode(pme
) != PmeRunMode::CPU
);
268 * Resets the PME GPU timings. To be called at the reset step.
270 * \param[in] pme The PME structure.
272 void pme_gpu_reset_timings(const gmx_pme_t
*pme
);
275 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
277 * \param[in] pme The PME structure.
278 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
280 void pme_gpu_get_timings(const gmx_pme_t
*pme
,
281 gmx_wallclock_gpu_pme_t
*timings
);
283 /* The main PME GPU functions */
286 * Prepares PME on GPU computation (updating the box if needed)
287 * \param[in] pme The PME data structure.
288 * \param[in] needToUpdateBox Tells if the stored unit cell parameters should be updated from \p box.
289 * \param[in] box The unit cell box.
290 * \param[in] wcycle The wallclock counter.
291 * \param[in] flags The combination of flags to affect this PME computation.
292 * The flags are the GMX_PME_ flags from pme.h.
294 void pme_gpu_prepare_computation(gmx_pme_t
*pme
,
295 bool needToUpdateBox
,
297 gmx_wallcycle_t wcycle
,
301 * Launches first stage of PME on GPU - H2D input transfers, spreading kernel, and D2H grid transfer if needed.
303 * \param[in] pme The PME data structure.
304 * \param[in] x The array of local atoms' coordinates.
305 * \param[in] wcycle The wallclock counter.
307 void pme_gpu_launch_spread(gmx_pme_t
*pme
,
309 gmx_wallcycle_t wcycle
);
312 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
314 * \param[in] pme The PME data structure.
315 * \param[in] wcycle The wallclock counter.
317 void pme_gpu_launch_complex_transforms(gmx_pme_t
*pme
,
318 gmx_wallcycle_t wcycle
);
321 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
323 * \param[in] pme The PME data structure.
324 * \param[in] wcycle The wallclock counter.
325 * \param[in] forceTreatment Tells how data should be treated. The gathering kernel either stores
326 * the output reciprocal forces into the host array, or copies its contents to the GPU first
327 * and accumulates. The reduction is non-atomic.
329 void pme_gpu_launch_gather(const gmx_pme_t
*pme
,
330 gmx_wallcycle_t wcycle
,
331 PmeForceOutputHandling forceTreatment
);
334 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
335 * (if they were to be computed).
337 * \param[in] pme The PME data structure.
338 * \param[out] wcycle The wallclock counter.
339 * \param[out] forces The output forces.
340 * \param[out] virial The output virial matrix.
341 * \param[out] energy The output energy.
343 void pme_gpu_wait_for_gpu(const gmx_pme_t
*pme
,
344 gmx_wallcycle_t wcycle
,
345 gmx::ArrayRef
<const gmx::RVec
> *forces
,