Reduce hwloc & cpuid test requirements
[gromacs.git] / src / programs / mdrun / tests / moduletest.cpp
blobcf1c3ada4ba36f10ee4f030eba1192600862ecbb
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief
37 * Implements classes in moduletest.h.
39 * \author Mark Abraham <mark.j.abraham@gmail.com>
40 * \ingroup module_mdrun_integration_tests
42 #include "gmxpre.h"
44 #include "moduletest.h"
46 #include "config.h"
48 #include <cstdio>
50 #include "gromacs/gmxpreprocess/grompp.h"
51 #include "gromacs/hardware/detecthardware.h"
52 #include "gromacs/options/basicoptions.h"
53 #include "gromacs/options/ioptionscontainer.h"
54 #include "gromacs/utility/basedefinitions.h"
55 #include "gromacs/utility/basenetwork.h"
56 #include "gromacs/utility/gmxmpi.h"
57 #include "gromacs/utility/textwriter.h"
58 #include "programs/mdrun/mdrun_main.h"
60 #include "testutils/cmdlinetest.h"
61 #include "testutils/integrationtests.h"
62 #include "testutils/testoptions.h"
64 namespace gmx
66 namespace test
69 /********************************************************************
70 * MdrunTestFixture
73 namespace
76 #if GMX_THREAD_MPI || defined(DOXYGEN)
77 //! Number of tMPI threads for child mdrun call.
78 int g_numThreads = 1;
79 #endif
80 #if GMX_OPENMP || defined(DOXYGEN)
81 //! Number of OpenMP threads for child mdrun call.
82 int g_numOpenMPThreads = 1;
83 #endif
84 //! \cond
85 GMX_TEST_OPTIONS(MdrunTestOptions, options)
87 GMX_UNUSED_VALUE(options);
88 #if GMX_THREAD_MPI
89 options->addOption(IntegerOption("nt").store(&g_numThreads)
90 .description("Number of thread-MPI threads/ranks for child mdrun calls"));
91 #endif
92 #if GMX_OPENMP
93 options->addOption(IntegerOption("nt_omp").store(&g_numOpenMPThreads)
94 .description("Number of OpenMP threads for child mdrun calls"));
95 #endif
97 //! \endcond
101 SimulationRunner::SimulationRunner(IntegrationTestFixture *fixture) :
102 fixture_(fixture),
103 topFileName_(),
104 groFileName_(),
105 fullPrecisionTrajectoryFileName_(),
106 ndxFileName_(),
107 mdpInputFileName_(fixture_->fileManager_.getTemporaryFilePath("input.mdp")),
108 mdpOutputFileName_(fixture_->fileManager_.getTemporaryFilePath("output.mdp")),
109 tprFileName_(fixture_->fileManager_.getTemporaryFilePath(".tpr")),
110 logFileName_(fixture_->fileManager_.getTemporaryFilePath(".log")),
111 edrFileName_(fixture_->fileManager_.getTemporaryFilePath(".edr")),
112 nsteps_(-2)
114 #if GMX_LIB_MPI
115 GMX_RELEASE_ASSERT(gmx_mpi_initialized(), "MPI system not initialized for mdrun tests");
116 #endif
119 // TODO The combination of defaulting to Verlet cut-off scheme, NVE,
120 // and verlet-buffer-tolerance = -1 gives a grompp error. If we keep
121 // things that way, this function should be renamed. For now,
122 // force the use of the group scheme.
123 void
124 SimulationRunner::useEmptyMdpFile()
126 // TODO When removing the group scheme, update actual and potential users of useEmptyMdpFile
127 useStringAsMdpFile("cutoff-scheme = Group\n");
130 void
131 SimulationRunner::useStringAsMdpFile(const char *mdpString)
133 useStringAsMdpFile(std::string(mdpString));
136 void
137 SimulationRunner::useStringAsMdpFile(const std::string &mdpString)
139 gmx::TextWriter::writeFileFromString(mdpInputFileName_, mdpString);
142 void
143 SimulationRunner::useStringAsNdxFile(const char *ndxString)
145 gmx::TextWriter::writeFileFromString(ndxFileName_, ndxString);
148 void
149 SimulationRunner::useTopGroAndNdxFromDatabase(const char *name)
151 topFileName_ = fixture_->fileManager_.getInputFilePath((std::string(name) + ".top").c_str());
152 groFileName_ = fixture_->fileManager_.getInputFilePath((std::string(name) + ".gro").c_str());
153 ndxFileName_ = fixture_->fileManager_.getInputFilePath((std::string(name) + ".ndx").c_str());
156 void
157 SimulationRunner::useGroFromDatabase(const char *name)
159 groFileName_ = fixture_->fileManager_.getInputFilePath((std::string(name) + ".gro").c_str());
163 SimulationRunner::callGromppOnThisRank(const CommandLine &callerRef)
165 CommandLine caller;
166 caller.append("grompp");
167 caller.merge(callerRef);
168 caller.addOption("-f", mdpInputFileName_);
169 caller.addOption("-n", ndxFileName_);
170 caller.addOption("-p", topFileName_);
171 caller.addOption("-c", groFileName_);
173 caller.addOption("-po", mdpOutputFileName_);
174 caller.addOption("-o", tprFileName_);
176 return gmx_grompp(caller.argc(), caller.argv());
180 SimulationRunner::callGromppOnThisRank()
182 return callGromppOnThisRank(CommandLine());
186 SimulationRunner::callGrompp(const CommandLine &callerRef)
188 int returnValue = 0;
189 #if GMX_LIB_MPI
190 // When compiled with external MPI, we're trying to run mdrun with
191 // MPI, but we need to make sure that we only do grompp on one
192 // rank
193 if (0 == gmx_node_rank())
194 #endif
196 returnValue = callGromppOnThisRank(callerRef);
198 #if GMX_LIB_MPI
199 // Make sure rank zero has written the .tpr file before other
200 // ranks try to read it. Thread-MPI and serial do this just fine
201 // on their own.
202 MPI_Barrier(MPI_COMM_WORLD);
203 #endif
204 return returnValue;
208 SimulationRunner::callGrompp()
210 return callGrompp(CommandLine());
214 SimulationRunner::callMdrun(const CommandLine &callerRef)
216 /* Conforming to style guide by not passing a non-const reference
217 to this function. Passing a non-const reference might make it
218 easier to write code that incorrectly re-uses callerRef after
219 the call to this function. */
221 CommandLine caller;
222 caller.append("mdrun");
223 caller.merge(callerRef);
224 caller.addOption("-s", tprFileName_);
226 caller.addOption("-g", logFileName_);
227 caller.addOption("-e", edrFileName_);
228 caller.addOption("-o", fullPrecisionTrajectoryFileName_);
229 caller.addOption("-x", reducedPrecisionTrajectoryFileName_);
231 caller.addOption("-deffnm", fixture_->fileManager_.getTemporaryFilePath("state"));
233 if (nsteps_ > -2)
235 caller.addOption("-nsteps", nsteps_);
238 #if GMX_MPI
239 # if GMX_GPU != GMX_GPU_NONE
240 # if GMX_THREAD_MPI
241 int numGpusNeeded = g_numThreads;
242 # else /* Must be real MPI */
243 int numGpusNeeded = gmx_node_num();
244 # endif
245 std::string gpuIdString(numGpusNeeded, '0');
246 caller.addOption("-gpu_id", gpuIdString.c_str());
247 # endif
248 #endif
250 #if GMX_THREAD_MPI
251 caller.addOption("-ntmpi", g_numThreads);
252 #endif
254 #if GMX_OPENMP
255 caller.addOption("-ntomp", g_numOpenMPThreads);
256 #endif
258 #if GMX_GPU != GMX_GPU_NONE
259 /* TODO Ideally, with real MPI, we could call
260 * gmx_collect_hardware_mpi() here and find out how many nodes
261 * mdrun will run on. For now, we assume that we're running on one
262 * node regardless of the number of ranks, because that's true in
263 * Jenkins and for most developers running the tests. */
264 int numberOfNodes = 1;
265 #if GMX_THREAD_MPI
266 /* Can't use gmx_node_num() because it is only valid after spawn of thread-MPI threads */
267 int numberOfRanks = g_numThreads;
268 #elif GMX_LIB_MPI
269 int numberOfRanks = gmx_node_num();
270 #else
271 int numberOfRanks = 1;
272 #endif
273 if (numberOfRanks > numberOfNodes && !gmx_multiple_gpu_per_node_supported())
275 if (gmx_node_rank() == 0)
277 fprintf(stderr, "GROMACS in this build configuration cannot run on more than one GPU per node,\n so with %d ranks and %d nodes, this test will disable GPU support", numberOfRanks, numberOfNodes);
279 caller.addOption("-nb", "cpu");
281 #endif
282 return gmx_mdrun(caller.argc(), caller.argv());
286 SimulationRunner::callMdrun()
288 return callMdrun(CommandLine());
291 // ====
293 MdrunTestFixtureBase::MdrunTestFixtureBase()
295 #if GMX_LIB_MPI
296 GMX_RELEASE_ASSERT(gmx_mpi_initialized(), "MPI system not initialized for mdrun tests");
297 #endif
300 MdrunTestFixtureBase::~MdrunTestFixtureBase()
304 // ====
306 MdrunTestFixture::MdrunTestFixture() : runner_(this)
310 MdrunTestFixture::~MdrunTestFixture()
314 } // namespace test
315 } // namespace gmx