Move DeviceInfo into GPU traits
[gromacs.git] / src / gromacs / ewald / tests / pmetestcommon.cpp
blob891e7bb048612a4005f7c8e2dc3b10ba15da9f72
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief
37 * Implements common routines for PME tests.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
42 #include "gmxpre.h"
44 #include "pmetestcommon.h"
46 #include <cstring>
48 #include <algorithm>
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/pme_gather.h"
52 #include "gromacs/ewald/pme_gpu_internal.h"
53 #include "gromacs/ewald/pme_gpu_staging.h"
54 #include "gromacs/ewald/pme_grid.h"
55 #include "gromacs/ewald/pme_internal.h"
56 #include "gromacs/ewald/pme_redistribute.h"
57 #include "gromacs/ewald/pme_solve.h"
58 #include "gromacs/ewald/pme_spread.h"
59 #include "gromacs/fft/parallel_3dfft.h"
60 #include "gromacs/gpu_utils/gpu_utils.h"
61 #include "gromacs/math/invertmatrix.h"
62 #include "gromacs/mdtypes/commrec.h"
63 #include "gromacs/pbcutil/pbc.h"
64 #include "gromacs/topology/topology.h"
65 #include "gromacs/utility/exceptions.h"
66 #include "gromacs/utility/gmxassert.h"
67 #include "gromacs/utility/logger.h"
68 #include "gromacs/utility/stringutil.h"
70 #include "testutils/testasserts.h"
72 namespace gmx
74 namespace test
77 bool pmeSupportsInputForMode(const gmx_hw_info_t& hwinfo, const t_inputrec* inputRec, CodePath mode)
79 bool implemented;
80 gmx_mtop_t mtop;
81 switch (mode)
83 case CodePath::CPU: implemented = true; break;
85 case CodePath::GPU:
86 implemented = (pme_gpu_supports_build(nullptr) && pme_gpu_supports_hardware(hwinfo, nullptr)
87 && pme_gpu_supports_input(*inputRec, mtop, nullptr));
88 break;
90 default: GMX_THROW(InternalError("Test not implemented for this mode"));
92 return implemented;
95 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder)
97 /* Arbitrary ulp tolerance for sine/cosine implementation. It's
98 * hard to know what to pick without testing lots of
99 * implementations. */
100 const uint64_t sineUlps = 10;
101 return 4 * (splineOrder - 2) + 2 * sineUlps * splineOrder;
104 //! PME initialization
105 PmeSafePointer pmeInitWrapper(const t_inputrec* inputRec,
106 const CodePath mode,
107 const DeviceInformation* deviceInfo,
108 const PmeGpuProgram* pmeGpuProgram,
109 const Matrix3x3& box,
110 const real ewaldCoeff_q,
111 const real ewaldCoeff_lj)
113 const MDLogger dummyLogger;
114 const auto runMode = (mode == CodePath::CPU) ? PmeRunMode::CPU : PmeRunMode::Mixed;
115 t_commrec dummyCommrec = { 0 };
116 NumPmeDomains numPmeDomains = { 1, 1 };
117 gmx_pme_t* pmeDataRaw =
118 gmx_pme_init(&dummyCommrec, numPmeDomains, inputRec, false, false, true, ewaldCoeff_q,
119 ewaldCoeff_lj, 1, runMode, nullptr, deviceInfo, pmeGpuProgram, dummyLogger);
120 PmeSafePointer pme(pmeDataRaw); // taking ownership
122 // TODO get rid of this with proper matrix type
123 matrix boxTemp;
124 for (int i = 0; i < DIM; i++)
126 for (int j = 0; j < DIM; j++)
128 boxTemp[i][j] = box[i * DIM + j];
131 const char* boxError = check_box(PbcType::Unset, boxTemp);
132 GMX_RELEASE_ASSERT(boxError == nullptr, boxError);
134 switch (mode)
136 case CodePath::CPU: invertBoxMatrix(boxTemp, pme->recipbox); break;
138 case CodePath::GPU:
139 pme_gpu_set_testing(pme->gpu, true);
140 pme_gpu_update_input_box(pme->gpu, boxTemp);
141 break;
143 default: GMX_THROW(InternalError("Test not implemented for this mode"));
146 return pme;
149 //! Simple PME initialization based on input, no atom data
150 PmeSafePointer pmeInitEmpty(const t_inputrec* inputRec,
151 const CodePath mode,
152 const DeviceInformation* deviceInfo,
153 const PmeGpuProgram* pmeGpuProgram,
154 const Matrix3x3& box,
155 real ewaldCoeff_q,
156 real ewaldCoeff_lj)
158 return pmeInitWrapper(inputRec, mode, deviceInfo, pmeGpuProgram, box, ewaldCoeff_q, ewaldCoeff_lj);
159 // hiding the fact that PME actually needs to know the number of atoms in advance
162 //! Make a GPU state-propagator manager
163 std::unique_ptr<StatePropagatorDataGpu> makeStatePropagatorDataGpu(const gmx_pme_t& pme)
165 // TODO: Pin the host buffer and use async memory copies
166 // TODO: Special constructor for PME-only rank / PME-tests is used here. There should be a mechanism to
167 // restrict one from using other constructor here.
168 return std::make_unique<StatePropagatorDataGpu>(
169 pme_gpu_get_device_stream(&pme), pme_gpu_get_device_context(&pme),
170 GpuApiCallBehavior::Sync, pme_gpu_get_padding_size(&pme), nullptr);
173 //! PME initialization with atom data
174 void pmeInitAtoms(gmx_pme_t* pme,
175 StatePropagatorDataGpu* stateGpu,
176 const CodePath mode,
177 const CoordinatesVector& coordinates,
178 const ChargesVector& charges)
180 const index atomCount = coordinates.size();
181 GMX_RELEASE_ASSERT(atomCount == charges.ssize(), "Mismatch in atom data");
182 PmeAtomComm* atc = nullptr;
184 switch (mode)
186 case CodePath::CPU:
187 atc = &(pme->atc[0]);
188 atc->x = coordinates;
189 atc->coefficient = charges;
190 gmx_pme_reinit_atoms(pme, atomCount, charges.data());
191 /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
192 break;
194 case CodePath::GPU:
195 // TODO: Avoid use of atc in the GPU code path
196 atc = &(pme->atc[0]);
197 // We need to set atc->n for passing the size in the tests
198 atc->setNumAtoms(atomCount);
199 gmx_pme_reinit_atoms(pme, atomCount, charges.data());
201 stateGpu->reinit(atomCount, atomCount);
202 stateGpu->copyCoordinatesToGpu(arrayRefFromArray(coordinates.data(), coordinates.size()),
203 gmx::AtomLocality::All);
204 pme_gpu_set_kernelparam_coordinates(pme->gpu, stateGpu->getCoordinates());
206 break;
208 default: GMX_THROW(InternalError("Test not implemented for this mode"));
212 //! Getting local PME real grid pointer for test I/O
213 static real* pmeGetRealGridInternal(const gmx_pme_t* pme)
215 const size_t gridIndex = 0;
216 return pme->fftgrid[gridIndex];
219 //! Getting local PME real grid dimensions
220 static void pmeGetRealGridSizesInternal(const gmx_pme_t* pme,
221 CodePath mode,
222 IVec& gridSize, //NOLINT(google-runtime-references)
223 IVec& paddedGridSize) //NOLINT(google-runtime-references)
225 const size_t gridIndex = 0;
226 IVec gridOffsetUnused;
227 switch (mode)
229 case CodePath::CPU:
230 gmx_parallel_3dfft_real_limits(pme->pfft_setup[gridIndex], gridSize, gridOffsetUnused,
231 paddedGridSize);
232 break;
234 case CodePath::GPU:
235 pme_gpu_get_real_grid_sizes(pme->gpu, &gridSize, &paddedGridSize);
236 break;
238 default: GMX_THROW(InternalError("Test not implemented for this mode"));
242 //! Getting local PME complex grid pointer for test I/O
243 static t_complex* pmeGetComplexGridInternal(const gmx_pme_t* pme)
245 const size_t gridIndex = 0;
246 return pme->cfftgrid[gridIndex];
249 //! Getting local PME complex grid dimensions
250 static void pmeGetComplexGridSizesInternal(const gmx_pme_t* pme,
251 IVec& gridSize, //NOLINT(google-runtime-references)
252 IVec& paddedGridSize) //NOLINT(google-runtime-references)
254 const size_t gridIndex = 0;
255 IVec gridOffsetUnused, complexOrderUnused;
256 gmx_parallel_3dfft_complex_limits(pme->pfft_setup[gridIndex], complexOrderUnused, gridSize,
257 gridOffsetUnused, paddedGridSize); // TODO: what about YZX ordering?
260 //! Getting the PME grid memory buffer and its sizes - template definition
261 template<typename ValueType>
262 static void pmeGetGridAndSizesInternal(const gmx_pme_t* /*unused*/,
263 CodePath /*unused*/,
264 ValueType*& /*unused*/, //NOLINT(google-runtime-references)
265 IVec& /*unused*/, //NOLINT(google-runtime-references)
266 IVec& /*unused*/) //NOLINT(google-runtime-references)
268 GMX_THROW(InternalError("Deleted function call"));
269 // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
272 //! Getting the PME real grid memory buffer and its sizes
273 template<>
274 void pmeGetGridAndSizesInternal<real>(const gmx_pme_t* pme, CodePath mode, real*& grid, IVec& gridSize, IVec& paddedGridSize)
276 grid = pmeGetRealGridInternal(pme);
277 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSize);
280 //! Getting the PME complex grid memory buffer and its sizes
281 template<>
282 void pmeGetGridAndSizesInternal<t_complex>(const gmx_pme_t* pme,
283 CodePath /*unused*/,
284 t_complex*& grid,
285 IVec& gridSize,
286 IVec& paddedGridSize)
288 grid = pmeGetComplexGridInternal(pme);
289 pmeGetComplexGridSizesInternal(pme, gridSize, paddedGridSize);
292 //! PME spline calculation and charge spreading
293 void pmePerformSplineAndSpread(gmx_pme_t* pme,
294 CodePath mode, // TODO const qualifiers elsewhere
295 bool computeSplines,
296 bool spreadCharges)
298 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
299 PmeAtomComm* atc = &(pme->atc[0]);
300 const size_t gridIndex = 0;
301 const bool computeSplinesForZeroCharges = true;
302 real* fftgrid = spreadCharges ? pme->fftgrid[gridIndex] : nullptr;
303 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
305 switch (mode)
307 case CodePath::CPU:
308 spread_on_grid(pme, atc, &pme->pmegrid[gridIndex], computeSplines, spreadCharges,
309 fftgrid, computeSplinesForZeroCharges, gridIndex);
310 if (spreadCharges && !pme->bUseThreads)
312 wrap_periodic_pmegrid(pme, pmegrid);
313 copy_pmegrid_to_fftgrid(pme, pmegrid, fftgrid, gridIndex);
315 break;
317 case CodePath::GPU:
319 // no synchronization needed as x is transferred in the PME stream
320 GpuEventSynchronizer* xReadyOnDevice = nullptr;
321 pme_gpu_spread(pme->gpu, xReadyOnDevice, gridIndex, fftgrid, computeSplines, spreadCharges);
323 break;
325 default: GMX_THROW(InternalError("Test not implemented for this mode"));
329 //! Getting the internal spline data buffer pointer
330 static real* pmeGetSplineDataInternal(const gmx_pme_t* pme, PmeSplineDataType type, int dimIndex)
332 GMX_ASSERT((0 <= dimIndex) && (dimIndex < DIM), "Invalid dimension index");
333 const PmeAtomComm* atc = &(pme->atc[0]);
334 const size_t threadIndex = 0;
335 real* splineBuffer = nullptr;
336 switch (type)
338 case PmeSplineDataType::Values:
339 splineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
340 break;
342 case PmeSplineDataType::Derivatives:
343 splineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
344 break;
346 default: GMX_THROW(InternalError("Unknown spline data type"));
348 return splineBuffer;
351 //! PME solving
352 void pmePerformSolve(const gmx_pme_t* pme,
353 CodePath mode,
354 PmeSolveAlgorithm method,
355 real cellVolume,
356 GridOrdering gridOrdering,
357 bool computeEnergyAndVirial)
359 t_complex* h_grid = pmeGetComplexGridInternal(pme);
360 const bool useLorentzBerthelot = false;
361 const size_t threadIndex = 0;
362 switch (mode)
364 case CodePath::CPU:
365 if (gridOrdering != GridOrdering::YZX)
367 GMX_THROW(InternalError("Test not implemented for this mode"));
369 switch (method)
371 case PmeSolveAlgorithm::Coulomb:
372 solve_pme_yzx(pme, h_grid, cellVolume, computeEnergyAndVirial, pme->nthread, threadIndex);
373 break;
375 case PmeSolveAlgorithm::LennardJones:
376 solve_pme_lj_yzx(pme, &h_grid, useLorentzBerthelot, cellVolume,
377 computeEnergyAndVirial, pme->nthread, threadIndex);
378 break;
380 default: GMX_THROW(InternalError("Test not implemented for this mode"));
382 break;
384 case CodePath::GPU:
385 switch (method)
387 case PmeSolveAlgorithm::Coulomb:
388 pme_gpu_solve(pme->gpu, h_grid, gridOrdering, computeEnergyAndVirial);
389 break;
391 default: GMX_THROW(InternalError("Test not implemented for this mode"));
393 break;
395 default: GMX_THROW(InternalError("Test not implemented for this mode"));
399 //! PME force gathering
400 void pmePerformGather(gmx_pme_t* pme, CodePath mode, PmeForceOutputHandling inputTreatment, ForcesVector& forces)
402 PmeAtomComm* atc = &(pme->atc[0]);
403 const index atomCount = atc->numAtoms();
404 GMX_RELEASE_ASSERT(forces.ssize() == atomCount, "Invalid force buffer size");
405 const bool forceReductionWithInput = (inputTreatment == PmeForceOutputHandling::ReduceWithInput);
406 const real scale = 1.0;
407 const size_t threadIndex = 0;
408 const size_t gridIndex = 0;
409 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
410 real* fftgrid = pme->fftgrid[gridIndex];
412 switch (mode)
414 case CodePath::CPU:
415 atc->f = forces;
416 if (atc->nthread == 1)
418 // something which is normally done in serial spline computation (make_thread_local_ind())
419 atc->spline[threadIndex].n = atomCount;
421 copy_fftgrid_to_pmegrid(pme, fftgrid, pmegrid, gridIndex, pme->nthread, threadIndex);
422 unwrap_periodic_pmegrid(pme, pmegrid);
423 gather_f_bsplines(pme, pmegrid, !forceReductionWithInput, atc, &atc->spline[threadIndex], scale);
424 break;
426 case CodePath::GPU:
428 // Variable initialization needs a non-switch scope
429 PmeOutput output = pme_gpu_getOutput(*pme, GMX_PME_CALC_F);
430 GMX_ASSERT(forces.size() == output.forces_.size(),
431 "Size of force buffers did not match");
432 if (forceReductionWithInput)
434 std::copy(std::begin(forces), std::end(forces), std::begin(output.forces_));
436 pme_gpu_gather(pme->gpu, inputTreatment, reinterpret_cast<float*>(fftgrid));
437 std::copy(std::begin(output.forces_), std::end(output.forces_), std::begin(forces));
439 break;
441 default: GMX_THROW(InternalError("Test not implemented for this mode"));
445 //! PME test finalization before fetching the outputs
446 void pmeFinalizeTest(const gmx_pme_t* pme, CodePath mode)
448 switch (mode)
450 case CodePath::CPU: break;
452 case CodePath::GPU: pme_gpu_synchronize(pme->gpu); break;
454 default: GMX_THROW(InternalError("Test not implemented for this mode"));
458 //! Setting atom spline values/derivatives to be used in spread/gather
459 void pmeSetSplineData(const gmx_pme_t* pme,
460 CodePath mode,
461 const SplineParamsDimVector& splineValues,
462 PmeSplineDataType type,
463 int dimIndex)
465 const PmeAtomComm* atc = &(pme->atc[0]);
466 const index atomCount = atc->numAtoms();
467 const index pmeOrder = pme->pme_order;
468 const index dimSize = pmeOrder * atomCount;
469 GMX_RELEASE_ASSERT(dimSize == splineValues.ssize(), "Mismatch in spline data");
470 real* splineBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
472 switch (mode)
474 case CodePath::CPU:
475 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
476 break;
478 case CodePath::GPU:
479 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
480 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::HostToGpu);
481 break;
483 default: GMX_THROW(InternalError("Test not implemented for this mode"));
487 //! Setting gridline indices to be used in spread/gather
488 void pmeSetGridLineIndices(gmx_pme_t* pme, CodePath mode, const GridLineIndicesVector& gridLineIndices)
490 PmeAtomComm* atc = &(pme->atc[0]);
491 const index atomCount = atc->numAtoms();
492 GMX_RELEASE_ASSERT(atomCount == gridLineIndices.ssize(), "Mismatch in gridline indices size");
494 IVec paddedGridSizeUnused, gridSize(0, 0, 0);
495 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSizeUnused);
497 for (const auto& index : gridLineIndices)
499 for (int i = 0; i < DIM; i++)
501 GMX_RELEASE_ASSERT((0 <= index[i]) && (index[i] < gridSize[i]),
502 "Invalid gridline index");
506 switch (mode)
508 case CodePath::GPU:
509 memcpy(pme_gpu_staging(pme->gpu).h_gridlineIndices, gridLineIndices.data(),
510 atomCount * sizeof(gridLineIndices[0]));
511 break;
513 case CodePath::CPU:
514 atc->idx.resize(gridLineIndices.size());
515 std::copy(gridLineIndices.begin(), gridLineIndices.end(), atc->idx.begin());
516 break;
517 default: GMX_THROW(InternalError("Test not implemented for this mode"));
521 //! Getting plain index into the complex 3d grid
522 inline size_t pmeGetGridPlainIndexInternal(const IVec& index, const IVec& paddedGridSize, GridOrdering gridOrdering)
524 size_t result;
525 switch (gridOrdering)
527 case GridOrdering::YZX:
528 result = (index[YY] * paddedGridSize[ZZ] + index[ZZ]) * paddedGridSize[XX] + index[XX];
529 break;
531 case GridOrdering::XYZ:
532 result = (index[XX] * paddedGridSize[YY] + index[YY]) * paddedGridSize[ZZ] + index[ZZ];
533 break;
535 default: GMX_THROW(InternalError("Test not implemented for this mode"));
537 return result;
540 //! Setting real or complex grid
541 template<typename ValueType>
542 static void pmeSetGridInternal(const gmx_pme_t* pme,
543 CodePath mode,
544 GridOrdering gridOrdering,
545 const SparseGridValuesInput<ValueType>& gridValues)
547 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
548 ValueType* grid;
549 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
551 switch (mode)
553 case CodePath::GPU: // intentional absence of break, the grid will be copied from the host buffer in testing mode
554 case CodePath::CPU:
555 std::memset(grid, 0,
556 paddedGridSize[XX] * paddedGridSize[YY] * paddedGridSize[ZZ] * sizeof(ValueType));
557 for (const auto& gridValue : gridValues)
559 for (int i = 0; i < DIM; i++)
561 GMX_RELEASE_ASSERT((0 <= gridValue.first[i]) && (gridValue.first[i] < gridSize[i]),
562 "Invalid grid value index");
564 const size_t gridValueIndex =
565 pmeGetGridPlainIndexInternal(gridValue.first, paddedGridSize, gridOrdering);
566 grid[gridValueIndex] = gridValue.second;
568 break;
570 default: GMX_THROW(InternalError("Test not implemented for this mode"));
574 //! Setting real grid to be used in gather
575 void pmeSetRealGrid(const gmx_pme_t* pme, CodePath mode, const SparseRealGridValuesInput& gridValues)
577 pmeSetGridInternal<real>(pme, mode, GridOrdering::XYZ, gridValues);
580 //! Setting complex grid to be used in solve
581 void pmeSetComplexGrid(const gmx_pme_t* pme,
582 CodePath mode,
583 GridOrdering gridOrdering,
584 const SparseComplexGridValuesInput& gridValues)
586 pmeSetGridInternal<t_complex>(pme, mode, gridOrdering, gridValues);
589 //! Getting the single dimension's spline values or derivatives
590 SplineParamsDimVector pmeGetSplineData(const gmx_pme_t* pme, CodePath mode, PmeSplineDataType type, int dimIndex)
592 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
593 const PmeAtomComm* atc = &(pme->atc[0]);
594 const size_t atomCount = atc->numAtoms();
595 const size_t pmeOrder = pme->pme_order;
596 const size_t dimSize = pmeOrder * atomCount;
598 real* sourceBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
599 SplineParamsDimVector result;
600 switch (mode)
602 case CodePath::GPU:
603 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::GpuToHost);
604 result = arrayRefFromArray(sourceBuffer, dimSize);
605 break;
607 case CodePath::CPU: result = arrayRefFromArray(sourceBuffer, dimSize); break;
609 default: GMX_THROW(InternalError("Test not implemented for this mode"));
611 return result;
614 //! Getting the gridline indices
615 GridLineIndicesVector pmeGetGridlineIndices(const gmx_pme_t* pme, CodePath mode)
617 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
618 const PmeAtomComm* atc = &(pme->atc[0]);
619 const size_t atomCount = atc->numAtoms();
621 GridLineIndicesVector gridLineIndices;
622 switch (mode)
624 case CodePath::GPU:
625 gridLineIndices = arrayRefFromArray(
626 reinterpret_cast<IVec*>(pme_gpu_staging(pme->gpu).h_gridlineIndices), atomCount);
627 break;
629 case CodePath::CPU: gridLineIndices = atc->idx; break;
631 default: GMX_THROW(InternalError("Test not implemented for this mode"));
633 return gridLineIndices;
636 //! Getting real or complex grid - only non zero values
637 template<typename ValueType>
638 static SparseGridValuesOutput<ValueType> pmeGetGridInternal(const gmx_pme_t* pme,
639 CodePath mode,
640 GridOrdering gridOrdering)
642 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
643 ValueType* grid;
644 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
645 SparseGridValuesOutput<ValueType> gridValues;
646 switch (mode)
648 case CodePath::GPU: // intentional absence of break
649 case CodePath::CPU:
650 gridValues.clear();
651 for (int ix = 0; ix < gridSize[XX]; ix++)
653 for (int iy = 0; iy < gridSize[YY]; iy++)
655 for (int iz = 0; iz < gridSize[ZZ]; iz++)
657 IVec temp(ix, iy, iz);
658 const size_t gridValueIndex =
659 pmeGetGridPlainIndexInternal(temp, paddedGridSize, gridOrdering);
660 const ValueType value = grid[gridValueIndex];
661 if (value != ValueType{})
663 auto key = formatString("Cell %d %d %d", ix, iy, iz);
664 gridValues[key] = value;
669 break;
671 default: GMX_THROW(InternalError("Test not implemented for this mode"));
673 return gridValues;
676 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
677 SparseRealGridValuesOutput pmeGetRealGrid(const gmx_pme_t* pme, CodePath mode)
679 return pmeGetGridInternal<real>(pme, mode, GridOrdering::XYZ);
682 //! Getting the complex grid output of pmePerformSolve()
683 SparseComplexGridValuesOutput pmeGetComplexGrid(const gmx_pme_t* pme, CodePath mode, GridOrdering gridOrdering)
685 return pmeGetGridInternal<t_complex>(pme, mode, gridOrdering);
688 //! Getting the reciprocal energy and virial
689 PmeOutput pmeGetReciprocalEnergyAndVirial(const gmx_pme_t* pme, CodePath mode, PmeSolveAlgorithm method)
691 PmeOutput output;
692 switch (mode)
694 case CodePath::CPU:
695 switch (method)
697 case PmeSolveAlgorithm::Coulomb:
698 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output);
699 break;
701 case PmeSolveAlgorithm::LennardJones:
702 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output);
703 break;
705 default: GMX_THROW(InternalError("Test not implemented for this mode"));
707 break;
708 case CodePath::GPU:
709 switch (method)
711 case PmeSolveAlgorithm::Coulomb: pme_gpu_getEnergyAndVirial(*pme, &output); break;
713 default: GMX_THROW(InternalError("Test not implemented for this mode"));
715 break;
717 default: GMX_THROW(InternalError("Test not implemented for this mode"));
719 return output;
722 } // namespace test
723 } // namespace gmx