Link GPU coordinate producer and consumer tasks
[gromacs.git] / src / gromacs / ewald / pme_gpu.cpp
blob8bbd184ac8a48d97e98e034c6c1415b7dcae504f
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \internal \file
37 * \brief Implements high-level PME GPU functions which do not require GPU framework-specific code.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
43 #include "gmxpre.h"
45 #include "config.h"
47 #include <list>
49 #include "gromacs/ewald/ewald_utils.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/fft/parallel_3dfft.h"
52 #include "gromacs/math/invertmatrix.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/enerdata.h"
55 #include "gromacs/mdtypes/forceoutput.h"
56 #include "gromacs/mdtypes/inputrec.h"
57 #include "gromacs/utility/exceptions.h"
58 #include "gromacs/utility/fatalerror.h"
59 #include "gromacs/utility/gmxassert.h"
60 #include "gromacs/utility/stringutil.h"
62 #include "pme_gpu_internal.h"
63 #include "pme_grid.h"
64 #include "pme_internal.h"
65 #include "pme_solve.h"
67 void pme_gpu_reset_timings(const gmx_pme_t *pme)
69 if (pme_gpu_active(pme))
71 pme_gpu_reset_timings(pme->gpu);
75 void pme_gpu_get_timings(const gmx_pme_t *pme, gmx_wallclock_gpu_pme_t *timings)
77 if (pme_gpu_active(pme))
79 pme_gpu_get_timings(pme->gpu, timings);
83 int pme_gpu_get_padding_size(const gmx_pme_t *pme)
86 if (!pme || !pme_gpu_active(pme))
88 return 0;
90 else
92 return pme_gpu_get_atom_data_alignment(pme->gpu);
96 /*! \brief
97 * A convenience wrapper for launching either the GPU or CPU FFT.
99 * \param[in] pme The PME structure.
100 * \param[in] gridIndex The grid index - should currently always be 0.
101 * \param[in] dir The FFT direction enum.
102 * \param[in] wcycle The wallclock counter.
104 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t *pme,
105 const int gridIndex,
106 enum gmx_fft_direction dir,
107 gmx_wallcycle_t wcycle)
109 GMX_ASSERT(gridIndex == 0, "Only single grid supported");
110 if (pme_gpu_performs_FFT(pme->gpu))
112 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
113 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
114 pme_gpu_3dfft(pme->gpu, dir, gridIndex);
115 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
116 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
118 else
120 wallcycle_start(wcycle, ewcPME_FFT_MIXED_MODE);
121 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
122 for (int thread = 0; thread < pme->nthread; thread++)
124 gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
126 wallcycle_stop(wcycle, ewcPME_FFT_MIXED_MODE);
130 /* The PME computation code split into a few separate functions. */
132 void pme_gpu_prepare_computation(gmx_pme_t *pme,
133 bool needToUpdateBox,
134 const matrix box,
135 gmx_wallcycle *wcycle,
136 int flags,
137 bool useGpuForceReduction)
139 GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
140 GMX_ASSERT(pme->nnodes > 0, "");
141 GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
143 PmeGpu *pmeGpu = pme->gpu;
144 pmeGpu->settings.currentFlags = flags;
145 // TODO these flags are only here to honor the CPU PME code, and probably should be removed
146 pmeGpu->settings.useGpuForceReduction = useGpuForceReduction;
148 bool shouldUpdateBox = false;
149 for (int i = 0; i < DIM; ++i)
151 for (int j = 0; j <= i; ++j)
153 shouldUpdateBox |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
154 pmeGpu->common->previousBox[i][j] = box[i][j];
158 if (needToUpdateBox || shouldUpdateBox) // || is to make the first computation always update
160 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
161 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
162 pme_gpu_update_input_box(pmeGpu, box);
163 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
164 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
166 if (!pme_gpu_performs_solve(pmeGpu))
168 // TODO remove code duplication and add test coverage
169 matrix scaledBox;
170 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
171 gmx::invertBoxMatrix(scaledBox, pme->recipbox);
172 pme->boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
177 void pme_gpu_launch_spread(gmx_pme_t *pme,
178 GpuEventSynchronizer *xReadyOnDevice,
179 gmx_wallcycle *wcycle)
181 GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
183 PmeGpu *pmeGpu = pme->gpu;
185 const unsigned int gridIndex = 0;
186 real *fftgrid = pme->fftgrid[gridIndex];
187 if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
189 /* Spread the coefficients on a grid */
190 const bool computeSplines = true;
191 const bool spreadCharges = true;
192 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
193 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
194 pme_gpu_spread(pmeGpu, xReadyOnDevice, gridIndex, fftgrid, computeSplines, spreadCharges);
195 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
196 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
200 void pme_gpu_launch_complex_transforms(gmx_pme_t *pme,
201 gmx_wallcycle *wcycle)
203 PmeGpu *pmeGpu = pme->gpu;
204 const bool computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
205 const bool performBackFFT = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
206 const unsigned int gridIndex = 0;
207 t_complex *cfftgrid = pme->cfftgrid[gridIndex];
209 if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
211 if (!pme_gpu_performs_FFT(pmeGpu))
213 wallcycle_start(wcycle, ewcWAIT_GPU_PME_SPREAD);
214 pme_gpu_sync_spread_grid(pme->gpu);
215 wallcycle_stop(wcycle, ewcWAIT_GPU_PME_SPREAD);
221 if (pmeGpu->settings.currentFlags & GMX_PME_SOLVE)
223 /* do R2C 3D-FFT */
224 parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
226 /* solve in k-space for our local cells */
227 if (pme_gpu_performs_solve(pmeGpu))
229 const auto gridOrdering = pme_gpu_uses_dd(pmeGpu) ? GridOrdering::YZX : GridOrdering::XYZ;
230 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
231 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
232 pme_gpu_solve(pmeGpu, cfftgrid, gridOrdering, computeEnergyAndVirial);
233 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
234 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
236 else
238 wallcycle_start(wcycle, ewcPME_SOLVE_MIXED_MODE);
239 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
240 for (int thread = 0; thread < pme->nthread; thread++)
242 solve_pme_yzx(pme, cfftgrid, pme->boxVolume,
243 computeEnergyAndVirial, pme->nthread, thread);
245 wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
249 if (performBackFFT)
251 parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
253 } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
256 void pme_gpu_launch_gather(const gmx_pme_t *pme,
257 gmx_wallcycle gmx_unused *wcycle,
258 PmeForceOutputHandling forceTreatment)
260 GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
262 if (!pme_gpu_performs_gather(pme->gpu))
264 return;
267 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
268 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
269 const unsigned int gridIndex = 0;
270 real *fftgrid = pme->fftgrid[gridIndex];
271 pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float *>(fftgrid));
272 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
273 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
276 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
277 static void sum_forces(gmx::ArrayRef<gmx::RVec> f,
278 gmx::ArrayRef<const gmx::RVec> forceToAdd)
280 const int end = forceToAdd.size();
282 int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
283 #pragma omp parallel for num_threads(nt) schedule(static)
284 for (int i = 0; i < end; i++)
286 f[i] += forceToAdd[i];
290 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
291 static void pme_gpu_reduce_outputs(const int flags,
292 const PmeOutput &output,
293 gmx_wallcycle *wcycle,
294 gmx::ForceWithVirial *forceWithVirial,
295 gmx_enerdata_t *enerd)
297 wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
298 GMX_ASSERT(forceWithVirial, "Invalid force pointer");
300 const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
301 if (haveComputedEnergyAndVirial)
303 GMX_ASSERT(enerd, "Invalid energy output manager");
304 forceWithVirial->addVirialContribution(output.coulombVirial_);
305 enerd->term[F_COUL_RECIP] += output.coulombEnergy_;
307 if (output.haveForceOutput_)
309 sum_forces(forceWithVirial->force_, output.forces_);
311 wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
314 bool pme_gpu_try_finish_task(gmx_pme_t *pme,
315 const int flags,
316 gmx_wallcycle *wcycle,
317 gmx::ForceWithVirial *forceWithVirial,
318 gmx_enerdata_t *enerd,
319 GpuTaskCompletion completionKind)
321 GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
322 GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction, "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
324 // First, if possible, check whether all tasks on the stream have
325 // completed, and return fast if not. Accumulate to wcycle the
326 // time needed for that checking, but do not yet record that the
327 // gather has occured.
328 bool needToSynchronize = true;
329 constexpr bool c_streamQuerySupported = (GMX_GPU == GMX_GPU_CUDA);
330 // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
331 if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
333 wallcycle_start_nocount(wcycle, ewcWAIT_GPU_PME_GATHER);
334 // Query the PME stream for completion of all tasks enqueued and
335 // if we're not done, stop the timer before early return.
336 const bool pmeGpuDone = pme_gpu_stream_query(pme->gpu);
337 wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
339 if (!pmeGpuDone)
341 return false;
343 needToSynchronize = false;
346 wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
347 // If the above check passed, then there is no need to make an
348 // explicit synchronization call.
349 if (needToSynchronize)
351 // Synchronize the whole PME stream at once, including D2H result transfers.
352 pme_gpu_synchronize(pme->gpu);
354 pme_gpu_update_timings(pme->gpu);
355 PmeOutput output = pme_gpu_getOutput(*pme, flags);
356 wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
358 GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
359 pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
361 return true;
364 // This is used by PME-only ranks
365 PmeOutput pme_gpu_wait_finish_task(gmx_pme_t *pme,
366 const int flags,
367 gmx_wallcycle *wcycle)
369 GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
371 wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
373 // Synchronize the whole PME stream at once, including D2H result transfers
374 // if there are outputs we need to wait for at this step; we still call getOutputs
375 // for uniformity and because it sets the PmeOutput.haveForceOutput_.
376 const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
377 if (!pme->gpu->settings.useGpuForceReduction || haveComputedEnergyAndVirial)
379 pme_gpu_synchronize(pme->gpu);
382 PmeOutput output = pme_gpu_getOutput(*pme, flags);
383 wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
384 return output;
387 // This is used when not using the alternate-waiting reduction
388 void pme_gpu_wait_and_reduce(gmx_pme_t *pme,
389 const int flags,
390 gmx_wallcycle *wcycle,
391 gmx::ForceWithVirial *forceWithVirial,
392 gmx_enerdata_t *enerd)
394 PmeOutput output = pme_gpu_wait_finish_task(pme, flags, wcycle);
395 GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
396 pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
399 void pme_gpu_reinit_computation(const gmx_pme_t *pme,
400 gmx_wallcycle *wcycle)
402 GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
404 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
405 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
407 pme_gpu_update_timings(pme->gpu);
409 pme_gpu_clear_grids(pme->gpu);
410 pme_gpu_clear_energy_virial(pme->gpu);
412 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
413 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
416 DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t *pme)
418 GMX_ASSERT((pme && pme_gpu_active(pme)), "PME GPU coordinates buffer was requested from uninitialized PME module");
419 return pme_gpu_get_kernelparam_coordinates(pme->gpu);
422 void *pme_gpu_get_device_f(const gmx_pme_t *pme)
424 if (!pme || !pme_gpu_active(pme))
426 return nullptr;
428 return pme_gpu_get_kernelparam_forces(pme->gpu);
431 void pme_gpu_set_device_x(const gmx_pme_t *pme,
432 DeviceBuffer<float> d_x)
434 GMX_ASSERT(pme != nullptr, "Null pointer is passed as a PME to the set coordinates function.");
435 GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
437 pme_gpu_set_kernelparam_coordinates(pme->gpu, d_x);
440 void *pme_gpu_get_device_stream(const gmx_pme_t *pme)
442 if (!pme || !pme_gpu_active(pme))
444 return nullptr;
446 return pme_gpu_get_stream(pme->gpu);
449 void *pme_gpu_get_device_context(const gmx_pme_t *pme)
451 if (!pme || !pme_gpu_active(pme))
453 return nullptr;
455 return pme_gpu_get_context(pme->gpu);
458 GpuEventSynchronizer * pme_gpu_get_f_ready_synchronizer(const gmx_pme_t *pme)
460 if (!pme || !pme_gpu_active(pme))
462 return nullptr;
465 return pme_gpu_get_forces_ready_synchronizer(pme->gpu);