Update instructions in containers.rst
[gromacs.git] / src / gromacs / taskassignment / resourcedivision.cpp
bloba967ed1c1b5d265154194c33953b58cb4e473ce2
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015,2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief Defines utility functionality for dividing resources and
37 * checking for consistency and usefulness.
39 * \author Mark Abraham <mark.j.abraham@gmail.com>
40 * \ingroup module_taskassignment
43 #include "gmxpre.h"
45 #include "resourcedivision.h"
47 #include "config.h"
49 #include <cstdlib>
50 #include <cstring>
52 #include <algorithm>
53 #include <array>
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/hardware/cpuinfo.h"
57 #include "gromacs/hardware/detecthardware.h"
58 #include "gromacs/hardware/hardwaretopology.h"
59 #include "gromacs/hardware/hw_info.h"
60 #include "gromacs/math/functions.h"
61 #include "gromacs/mdlib/gmx_omp_nthreads.h"
62 #include "gromacs/mdrunutility/multisim.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/inputrec.h"
65 #include "gromacs/mdtypes/md_enums.h"
66 #include "gromacs/topology/mtop_util.h"
67 #include "gromacs/topology/topology.h"
68 #include "gromacs/utility/baseversion.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/gmxassert.h"
71 #include "gromacs/utility/logger.h"
72 #include "gromacs/utility/physicalnodecommunicator.h"
73 #include "gromacs/utility/stringutil.h"
76 /* DISCLAIMER: All the atom count and thread numbers below are heuristic.
77 * The real switching points will depend on the system simulation,
78 * the algorithms used and the hardware it's running on, as well as if there
79 * are other jobs running on the same machine. We try to take into account
80 * factors that have a large influence, such as recent Intel CPUs being
81 * much better at wide multi-threading. The remaining factors should
82 * (hopefully) have a small influence, such that the performance just before
83 * and after a switch point doesn't change too much.
86 /*! \brief The minimum number of atoms per thread-MPI thread when GPUs
87 * are present. With fewer atoms than this, the number of thread-MPI
88 * ranks will get lowered.
90 static constexpr int min_atoms_per_mpi_thread = 90;
91 /*! \brief The minimum number of atoms per GPU with thread-MPI
92 * active. With fewer atoms than this, the number of thread-MPI ranks
93 * will get lowered.
95 static constexpr int min_atoms_per_gpu = 900;
97 /**@{*/
98 /*! \brief Constants for implementing default divisions of threads */
100 /* TODO choose nthreads_omp based on hardware topology
101 when we have a hardware topology detection library */
102 /* First we consider the case of no MPI (1 MPI rank).
103 * In general, when running up to 8 threads, OpenMP should be faster.
104 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
105 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
106 * even on two CPUs it's usually faster (but with many OpenMP threads
107 * it could be faster not to use HT, currently we always use HT).
108 * On Nehalem/Westmere we want to avoid running 16 threads over
109 * two CPUs with HT, so we need a limit<16; thus we use 12.
110 * A reasonable limit for Intel Sandy and Ivy bridge,
111 * not knowing the topology, is 16 threads.
112 * Below we check for Intel and AVX, which for now includes
113 * Sandy/Ivy Bridge, Has/Broadwell. By checking for AVX instead of
114 * model numbers we ensure also future Intel CPUs are covered.
116 constexpr int nthreads_omp_faster_default = 8;
117 constexpr int nthreads_omp_faster_Nehalem = 12;
118 constexpr int nthreads_omp_faster_Intel_AVX = 16;
119 constexpr int nthreads_omp_faster_AMD_Ryzen = 16;
120 /* For CPU only runs the fastest options are usually MPI or OpenMP only.
121 * With one GPU, using MPI only is almost never optimal, so we need to
122 * compare running pure OpenMP with combined MPI+OpenMP. This means higher
123 * OpenMP threads counts can still be ok. Multiplying the numbers above
124 * by a factor of 2 seems to be a good estimate.
126 constexpr int nthreads_omp_faster_gpu_fac = 2;
128 /* This is the case with MPI (2 or more MPI PP ranks).
129 * By default we will terminate with a fatal error when more than 8
130 * OpenMP thread are (indirectly) requested, since using less threads
131 * nearly always results in better performance.
132 * With thread-mpi and multiple GPUs or one GPU and too many threads
133 * we first try 6 OpenMP threads and then less until the number of MPI ranks
134 * is divisible by the number of GPUs.
136 constexpr int nthreads_omp_mpi_ok_max = 8;
137 constexpr int nthreads_omp_mpi_ok_min_cpu = 1;
138 constexpr int nthreads_omp_mpi_ok_min_gpu = 2;
139 constexpr int nthreads_omp_mpi_target_max = 6;
141 /**@}*/
143 /*! \brief Returns the maximum OpenMP thread count for which using a single MPI rank
144 * should be faster than using multiple ranks with the same total thread count.
146 static int nthreads_omp_faster(const gmx::CpuInfo& cpuInfo, gmx_bool bUseGPU)
148 int nth;
150 if (cpuInfo.vendor() == gmx::CpuInfo::Vendor::Intel && cpuInfo.feature(gmx::CpuInfo::Feature::X86_Avx))
152 nth = nthreads_omp_faster_Intel_AVX;
154 else if (gmx::cpuIsX86Nehalem(cpuInfo))
156 // Intel Nehalem
157 nth = nthreads_omp_faster_Nehalem;
159 else if ((cpuInfo.vendor() == gmx::CpuInfo::Vendor::Amd && cpuInfo.family() >= 23)
160 || cpuInfo.vendor() == gmx::CpuInfo::Vendor::Hygon)
162 // AMD Ryzen || Hygon Dhyana
163 nth = nthreads_omp_faster_AMD_Ryzen;
165 else
167 nth = nthreads_omp_faster_default;
170 if (bUseGPU)
172 nth *= nthreads_omp_faster_gpu_fac;
175 nth = std::min(nth, GMX_OPENMP_MAX_THREADS);
177 return nth;
180 /*! \brief Returns that maximum OpenMP thread count that passes the efficiency check */
181 gmx_unused static int nthreads_omp_efficient_max(int gmx_unused nrank, const gmx::CpuInfo& cpuInfo, gmx_bool bUseGPU)
183 if (GMX_OPENMP && GMX_MPI && (nrank > 1))
185 return nthreads_omp_mpi_ok_max;
187 else
189 return nthreads_omp_faster(cpuInfo, bUseGPU);
193 /*! \brief Return the number of thread-MPI ranks to use.
194 * This is chosen such that we can always obey our own efficiency checks.
196 gmx_unused static int get_tmpi_omp_thread_division(const gmx_hw_info_t* hwinfo,
197 const gmx_hw_opt_t& hw_opt,
198 int nthreads_tot,
199 int ngpu)
201 int nrank;
202 const gmx::CpuInfo& cpuInfo = *hwinfo->cpuInfo;
204 GMX_RELEASE_ASSERT(nthreads_tot > 0, "There must be at least one thread per rank");
206 /* There are no separate PME nodes here, as we ensured in
207 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
208 * and a conditional ensures we would not have ended up here.
209 * Note that separate PME nodes might be switched on later.
211 if (ngpu > 0)
213 if (hw_opt.nthreads_omp > 0)
215 /* In this case it is unclear if we should use 1 rank per GPU
216 * or more or less, so we require also setting the number of ranks.
218 gmx_fatal(FARGS,
219 "When using GPUs, setting the number of OpenMP threads without specifying "
220 "the number "
221 "of ranks can lead to conflicting demands. Please specify the number of "
222 "thread-MPI ranks "
223 "as well (option -ntmpi).");
226 nrank = ngpu;
228 /* When the user sets nthreads_omp, we can end up oversubscribing CPU cores
229 * if we simply start as many ranks as GPUs. To avoid this, we start as few
230 * tMPI ranks as necessary to avoid oversubscription and instead leave GPUs idle.
231 * If the user does not set the number of OpenMP threads, nthreads_omp==0 and
232 * this code has no effect.
234 GMX_RELEASE_ASSERT(hw_opt.nthreads_omp >= 0,
235 "nthreads_omp is negative, but previous checks should "
236 "have prevented this");
237 while (nrank * hw_opt.nthreads_omp > hwinfo->nthreads_hw_avail && nrank > 1)
239 nrank--;
242 if (nthreads_tot < nrank)
244 /* #thread < #gpu is very unlikely, but if so: waste gpu(s) */
245 nrank = nthreads_tot;
247 else if (nthreads_tot > nthreads_omp_faster(cpuInfo, ngpu > 0)
248 || (ngpu > 1 && nthreads_tot / ngpu > nthreads_omp_mpi_target_max))
250 /* The high OpenMP thread count will likely result in sub-optimal
251 * performance. Increase the rank count to reduce the thread count
252 * per rank. This will lead to GPU sharing by MPI ranks/threads.
254 int nshare;
256 /* Increase the rank count as long as have we more than 6 OpenMP
257 * threads per rank or the number of hardware threads is not
258 * divisible by the rank count. Don't go below 2 OpenMP threads.
260 nshare = 1;
263 nshare++;
264 nrank = ngpu * nshare;
265 } while (nthreads_tot / nrank > nthreads_omp_mpi_target_max
266 || (nthreads_tot / (ngpu * (nshare + 1)) >= nthreads_omp_mpi_ok_min_gpu
267 && nthreads_tot % nrank != 0));
270 else if (hw_opt.nthreads_omp > 0)
272 /* Here we could oversubscribe, when we do, we issue a warning later */
273 nrank = std::max(1, nthreads_tot / hw_opt.nthreads_omp);
275 else
277 if (nthreads_tot <= nthreads_omp_faster(cpuInfo, ngpu > 0))
279 /* Use pure OpenMP parallelization */
280 nrank = 1;
282 else
284 /* Don't use OpenMP parallelization */
285 nrank = nthreads_tot;
289 return nrank;
292 //! Return whether hyper threading is enabled.
293 static bool gmxSmtIsEnabled(const gmx::HardwareTopology& hwTop)
295 return (hwTop.supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic
296 && hwTop.machine().sockets[0].cores[0].hwThreads.size() > 1);
299 namespace
302 //! Handles checks for algorithms that must use a single rank.
303 class SingleRankChecker
305 public:
306 SingleRankChecker() : value_(false) {}
307 /*! \brief Call this function for each possible condition
308 under which a single rank is required, along with a string
309 describing the constraint when it is applied. */
310 void applyConstraint(bool condition, const char* description)
312 if (condition)
314 value_ = true;
315 reasons_.push_back(gmx::formatString("%s only supports a single rank.", description));
318 //! After applying any conditions, is a single rank required?
319 bool mustUseOneRank() const { return value_; }
320 /*! \brief Return a formatted string to use when writing a
321 message when a single rank is required, (or empty if no
322 constraint exists.) */
323 std::string getMessage() const
325 return formatAndJoin(reasons_, "\n", gmx::IdentityFormatter());
328 private:
329 bool value_;
330 std::vector<std::string> reasons_;
333 } // namespace
335 /* Get the number of MPI ranks to use for thread-MPI based on how many
336 * were requested, which algorithms we're using,
337 * and how many particles there are.
338 * At the point we have already called check_and_update_hw_opt.
339 * Thus all options should be internally consistent and consistent
340 * with the hardware, except that ntmpi could be larger than #GPU.
342 int get_nthreads_mpi(const gmx_hw_info_t* hwinfo,
343 gmx_hw_opt_t* hw_opt,
344 const int numDevicesToUse,
345 bool nonbondedOnGpu,
346 bool pmeOnGpu,
347 const t_inputrec* inputrec,
348 const gmx_mtop_t* mtop,
349 const gmx::MDLogger& mdlog,
350 bool doMembed)
352 int nthreads_hw, nthreads_tot_max, nrank, ngpu;
353 int min_atoms_per_mpi_rank;
355 const gmx::CpuInfo& cpuInfo = *hwinfo->cpuInfo;
356 const gmx::HardwareTopology& hwTop = *hwinfo->hardwareTopology;
358 if (pmeOnGpu)
360 GMX_RELEASE_ASSERT((EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
361 && pme_gpu_supports_build(nullptr)
362 && pme_gpu_supports_hardware(*hwinfo, nullptr)
363 && pme_gpu_supports_input(*inputrec, nullptr),
364 "PME can't be on GPUs unless we are using PME");
366 // PME on GPUs supports a single PME rank with PP running on the same or few other ranks.
367 // For now, let's treat separate PME GPU rank as opt-in.
368 if (hw_opt->nthreads_tmpi < 1)
370 return 1;
375 /* Check if an algorithm does not support parallel simulation. */
376 // TODO This might work better if e.g. implemented algorithms
377 // had to define a function that returns such requirements,
378 // and a description string.
379 SingleRankChecker checker;
380 checker.applyConstraint(inputrec->eI == eiLBFGS, "L-BFGS minimization");
381 checker.applyConstraint(inputrec->coulombtype == eelEWALD, "Plain Ewald electrostatics");
382 checker.applyConstraint(doMembed, "Membrane embedding");
383 bool useOrientationRestraints = (gmx_mtop_ftype_count(mtop, F_ORIRES) > 0);
384 checker.applyConstraint(useOrientationRestraints, "Orientation restraints");
385 if (checker.mustUseOneRank())
387 std::string message = checker.getMessage();
388 if (hw_opt->nthreads_tmpi > 1)
390 gmx_fatal(FARGS,
391 "%s However, you asked for more than 1 thread-MPI rank, so mdrun cannot "
392 "continue. "
393 "Choose a single rank, or a different algorithm.",
394 message.c_str());
396 GMX_LOG(mdlog.warning)
397 .asParagraph()
398 .appendTextFormatted("%s Choosing to use only a single thread-MPI rank.",
399 message.c_str());
400 return 1;
404 if (hw_opt->nthreads_tmpi > 0)
406 /* Trivial, return the user's choice right away */
407 return hw_opt->nthreads_tmpi;
410 // Now implement automatic selection of number of thread-MPI ranks
411 nthreads_hw = hwinfo->nthreads_hw_avail;
413 if (nthreads_hw <= 0)
415 /* This should normally not happen, but if it does, we handle it */
416 gmx_fatal(FARGS,
417 "The number of available hardware threads can not be detected, please specify "
418 "the number of "
419 "MPI ranks and the number of OpenMP threads (if supported) manually with options "
420 "-ntmpi and -ntomp, respectively");
423 /* How many total (#tMPI*#OpenMP) threads can we start? */
424 if (hw_opt->nthreads_tot > 0)
426 nthreads_tot_max = hw_opt->nthreads_tot;
428 else
430 nthreads_tot_max = nthreads_hw;
433 /* nonbondedOnGpu might be false e.g. because this simulation
434 * is a rerun with energy groups. */
435 ngpu = (nonbondedOnGpu ? numDevicesToUse : 0);
437 nrank = get_tmpi_omp_thread_division(hwinfo, *hw_opt, nthreads_tot_max, ngpu);
439 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
441 /* Dims/steps are divided over the nodes iso splitting the atoms.
442 * With NM we can't have more ranks than #atoms*#dim. With TPI it's
443 * unlikely we have fewer atoms than ranks, and if so, communication
444 * would become a bottleneck, so we set the limit to 1 atom/rank.
446 min_atoms_per_mpi_rank = 1;
448 else
450 if (ngpu >= 1)
452 min_atoms_per_mpi_rank = min_atoms_per_gpu;
454 else
456 min_atoms_per_mpi_rank = min_atoms_per_mpi_thread;
460 if (mtop->natoms / nrank < min_atoms_per_mpi_rank)
462 int nrank_new;
464 /* the rank number was chosen automatically, but there are too few
465 atoms per rank, so we need to reduce the rank count */
466 nrank_new = std::max(1, mtop->natoms / min_atoms_per_mpi_rank);
468 /* Avoid partial use of Hyper-Threading */
469 if (gmxSmtIsEnabled(hwTop) && nrank_new > nthreads_hw / 2 && nrank_new < nthreads_hw)
471 nrank_new = nthreads_hw / 2;
474 /* If the user specified the total thread count, ensure this is
475 * divisible by the number of ranks.
476 * It is quite likely that we have too many total threads compared
477 * to the size of the system, but if the user asked for this many
478 * threads we should respect that.
480 while (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_tot % nrank_new != 0)
482 nrank_new--;
485 /* Avoid large prime numbers in the rank count */
486 if (nrank_new >= 6)
488 /* Use only 6,8,10 with additional factors of 2 */
489 int fac;
491 fac = 2;
492 while (3 * fac * 2 <= nrank_new)
494 fac *= 2;
497 nrank_new = (nrank_new / fac) * fac;
499 else
501 /* Avoid 5, since small system won't fit 5 domains along
502 * a dimension. This might lead to waisting some cores, but this
503 * will have a small impact in this regime of very small systems.
505 if (nrank_new == 5)
507 nrank_new = 4;
511 nrank = nrank_new;
513 /* We reduced the number of tMPI ranks, which means we might violate
514 * our own efficiency checks if we simply use all hardware threads.
516 if (GMX_OPENMP && hw_opt->nthreads_omp <= 0 && hw_opt->nthreads_tot <= 0)
518 /* The user set neither the total nor the OpenMP thread count,
519 * we should use all hardware threads, unless we will violate
520 * our own efficiency limitation on the thread count.
522 int nt_omp_max;
524 nt_omp_max = nthreads_omp_efficient_max(nrank, cpuInfo, ngpu >= 1);
526 if (nrank * nt_omp_max < hwinfo->nthreads_hw_avail)
528 /* Limit the number of OpenMP threads to start */
529 hw_opt->nthreads_omp = nt_omp_max;
533 fprintf(stderr, "\n");
534 fprintf(stderr, "NOTE: Parallelization is limited by the small number of atoms,\n");
535 fprintf(stderr, " only starting %d thread-MPI ranks.\n", nrank);
536 fprintf(stderr,
537 " You can use the -nt and/or -ntmpi option to optimize the number of "
538 "threads.\n\n");
541 return nrank;
545 void check_resource_division_efficiency(const gmx_hw_info_t* hwinfo,
546 bool willUsePhysicalGpu,
547 gmx_bool bNtOmpOptionSet,
548 t_commrec* cr,
549 const gmx::MDLogger& mdlog)
551 #if GMX_OPENMP && GMX_MPI
552 GMX_UNUSED_VALUE(hwinfo);
554 int nth_omp_max;
555 char buf[1000];
556 const char* mpi_option = GMX_THREAD_MPI ? " (option -ntmpi)" : "";
558 /* This function should be called after thread-MPI (when configured) and
559 * OpenMP have been initialized. Check that here.
561 if (GMX_THREAD_MPI)
563 GMX_RELEASE_ASSERT(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max,
564 "Inconsistent OpenMP thread count default values");
566 GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) >= 1,
567 "Must have at least one OpenMP thread");
569 nth_omp_max = gmx_omp_nthreads_get(emntDefault);
571 bool anyRankIsUsingGpus = willUsePhysicalGpu;
572 /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
573 if (cr->nnodes > 1)
575 std::array<int, 2> count, count_max;
577 count[0] = nth_omp_max;
578 count[1] = int(willUsePhysicalGpu);
580 MPI_Allreduce(count.data(), count_max.data(), count.size(), MPI_INT, MPI_MAX, cr->mpi_comm_mysim);
582 /* In case of an inhomogeneous run setup we use the maximum counts */
583 nth_omp_max = count_max[0];
584 anyRankIsUsingGpus = count_max[1] > 0;
587 int nthreads_omp_mpi_ok_min;
589 if (!anyRankIsUsingGpus)
591 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_cpu;
593 else
595 /* With GPUs we set the minimum number of OpenMP threads to 2 to catch
596 * cases where the user specifies #ranks == #cores.
598 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_gpu;
601 if (DOMAINDECOMP(cr))
603 if (nth_omp_max < nthreads_omp_mpi_ok_min || nth_omp_max > nthreads_omp_mpi_ok_max)
605 /* Note that we print target_max here, not ok_max */
606 sprintf(buf,
607 "Your choice of number of MPI ranks and amount of resources results in using "
608 "%d OpenMP "
609 "threads per rank, which is most likely inefficient. The optimum is usually "
610 "between %d and"
611 " %d threads per rank.",
612 nth_omp_max, nthreads_omp_mpi_ok_min, nthreads_omp_mpi_target_max);
614 if (bNtOmpOptionSet)
616 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("NOTE: %s", buf);
618 else
620 /* This fatal error, and the one below, is nasty, but it's
621 * probably the only way to ensure that all users don't waste
622 * a lot of resources, since many users don't read logs/stderr.
624 gmx_fatal(FARGS,
625 "%s If you want to run with this setup, specify the -ntomp option. But "
626 "we suggest to "
627 "change the number of MPI ranks%s.",
628 buf, mpi_option);
632 #else // !GMX_OPENMP || ! GMX_MPI
633 GMX_UNUSED_VALUE(bNtOmpOptionSet);
634 GMX_UNUSED_VALUE(willUsePhysicalGpu);
635 GMX_UNUSED_VALUE(cr);
636 GMX_UNUSED_VALUE(nthreads_omp_mpi_ok_max);
637 GMX_UNUSED_VALUE(nthreads_omp_mpi_ok_min_cpu);
638 /* Check if we have more than 1 physical core, if detected,
639 * or more than 1 hardware thread if physical cores were not detected.
641 if (!GMX_OPENMP && !GMX_MPI && hwinfo->hardwareTopology->numberOfCores() > 1)
643 GMX_LOG(mdlog.warning)
644 .asParagraph()
645 .appendText(
646 "NOTE: GROMACS was compiled without OpenMP and (thread-)MPI support, can "
647 "only use a single CPU core");
649 #endif // end GMX_OPENMP && GMX_MPI
653 //! Dump a \c hw_opt to \c fp.
654 static void print_hw_opt(FILE* fp, const gmx_hw_opt_t* hw_opt)
656 fprintf(fp, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s' gputasks '%s'\n",
657 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp, hw_opt->nthreads_omp_pme,
658 hw_opt->gpuIdsAvailable.c_str(), hw_opt->userGpuTaskAssignment.c_str());
661 void checkAndUpdateHardwareOptions(const gmx::MDLogger& mdlog,
662 gmx_hw_opt_t* hw_opt,
663 const bool isSimulationMasterRank,
664 const int nPmeRanks,
665 const t_inputrec* inputrec)
667 /* Currently hw_opt only contains default settings or settings supplied
668 * by the user on the command line.
670 if (hw_opt->nthreads_omp < 0)
672 gmx_fatal(FARGS,
673 "The number of OpenMP threads supplied on the command line is %d, which is "
674 "negative "
675 "and not allowed",
676 hw_opt->nthreads_omp);
679 /* Check for OpenMP settings stored in environment variables, which can
680 * potentially be different on different MPI ranks.
682 gmx_omp_nthreads_read_env(mdlog, &hw_opt->nthreads_omp);
684 /* Check restrictions on the user supplied options before modifying them.
685 * TODO: Put the user values in a const struct and preserve them.
687 if (!GMX_THREAD_MPI)
690 if (hw_opt->nthreads_tot > 0)
692 gmx_fatal(FARGS,
693 "Setting the total number of threads is only supported with thread-MPI and "
694 "GROMACS was "
695 "compiled without thread-MPI");
697 if (hw_opt->nthreads_tmpi > 0)
699 gmx_fatal(FARGS,
700 "Setting the number of thread-MPI ranks is only supported with thread-MPI "
701 "and GROMACS was "
702 "compiled without thread-MPI");
706 /* With thread-MPI we need to handle TPI and #OpenMP-threads=auto early,
707 * so we can parallelize using MPI only. The general check is done later.
709 if (GMX_THREAD_MPI && isSimulationMasterRank)
711 GMX_RELEASE_ASSERT(inputrec, "Expect a valid inputrec");
712 if (EI_TPI(inputrec->eI) && hw_opt->nthreads_omp == 0)
714 hw_opt->nthreads_omp = 1;
717 /* With thread-MPI the master thread sets hw_opt->totNumThreadsIsAuto.
718 * The other threads receive a partially processed hw_opt from the master
719 * thread and should not set hw_opt->totNumThreadsIsAuto again.
721 if (!GMX_THREAD_MPI || isSimulationMasterRank)
723 /* Check if mdrun is free to choose the total number of threads */
724 hw_opt->totNumThreadsIsAuto = (hw_opt->nthreads_omp == 0 && hw_opt->nthreads_omp_pme == 0
725 && hw_opt->nthreads_tot == 0);
728 if (GMX_OPENMP)
730 /* Check restrictions on PME thread related options set by the user */
732 if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
734 gmx_fatal(FARGS, "You need to specify -ntomp in addition to -ntomp_pme");
737 if (hw_opt->nthreads_omp_pme >= 1 && hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp
738 && nPmeRanks <= 0)
740 /* This can result in a fatal error on many MPI ranks,
741 * but since the thread count can differ per rank,
742 * we can't easily avoid this.
744 gmx_fatal(FARGS,
745 "You need to explicitly specify the number of PME ranks (-npme) when using "
746 "different numbers of OpenMP threads for PP and PME ranks");
749 else
751 /* GROMACS was configured without OpenMP support */
753 if (hw_opt->nthreads_omp > 1 || hw_opt->nthreads_omp_pme > 1)
755 gmx_fatal(FARGS,
756 "More than 1 OpenMP thread requested, but GROMACS was compiled without "
757 "OpenMP support");
759 hw_opt->nthreads_omp = 1;
760 hw_opt->nthreads_omp_pme = 1;
763 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
765 /* We have the same number of OpenMP threads for PP and PME ranks,
766 * thus we can perform several consistency checks.
768 if (hw_opt->nthreads_tmpi > 0 && hw_opt->nthreads_omp > 0
769 && hw_opt->nthreads_tot != hw_opt->nthreads_tmpi * hw_opt->nthreads_omp)
771 gmx_fatal(FARGS,
772 "The total number of threads requested (%d) does not match the thread-MPI "
773 "ranks (%d) "
774 "times the OpenMP threads (%d) requested",
775 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp);
778 if (hw_opt->nthreads_tmpi > 0 && hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
780 gmx_fatal(FARGS,
781 "The total number of threads requested (%d) is not divisible by the number "
782 "of thread-MPI "
783 "ranks requested (%d)",
784 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi);
787 if (hw_opt->nthreads_omp > 0 && hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
789 gmx_fatal(FARGS,
790 "The total number of threads requested (%d) is not divisible by the number "
791 "of OpenMP "
792 "threads requested (%d)",
793 hw_opt->nthreads_tot, hw_opt->nthreads_omp);
797 if (hw_opt->nthreads_tot > 0)
799 if (hw_opt->nthreads_omp > hw_opt->nthreads_tot)
801 gmx_fatal(FARGS,
802 "You requested %d OpenMP threads with %d total threads. Choose a total "
803 "number of threads "
804 "that is a multiple of the number of OpenMP threads.",
805 hw_opt->nthreads_omp, hw_opt->nthreads_tot);
808 if (hw_opt->nthreads_tmpi > hw_opt->nthreads_tot)
810 gmx_fatal(FARGS,
811 "You requested %d thread-MPI ranks with %d total threads. Choose a total "
812 "number of "
813 "threads that is a multiple of the number of thread-MPI ranks.",
814 hw_opt->nthreads_tmpi, hw_opt->nthreads_tot);
818 if (GMX_THREAD_MPI && nPmeRanks > 0 && hw_opt->nthreads_tmpi <= 0)
820 gmx_fatal(FARGS,
821 "You need to explicitly specify the number of MPI threads (-ntmpi) when using "
822 "separate PME ranks");
825 if (debug)
827 print_hw_opt(debug, hw_opt);
830 /* Asserting this simplifies the hardware resource division later
831 * on. */
832 GMX_RELEASE_ASSERT(
833 !(hw_opt->nthreads_omp_pme >= 1 && hw_opt->nthreads_omp <= 0),
834 "PME thread count should only be set when the normal thread count is also set");
837 void checkAndUpdateRequestedNumOpenmpThreads(gmx_hw_opt_t* hw_opt,
838 const gmx_hw_info_t& hwinfo,
839 const t_commrec* cr,
840 const gmx_multisim_t* ms,
841 int numRanksOnThisNode,
842 PmeRunMode pmeRunMode,
843 const gmx_mtop_t& mtop,
844 const t_inputrec& inputrec)
846 if (EI_TPI(inputrec.eI))
848 if (hw_opt->nthreads_omp > 1)
850 gmx_fatal(FARGS,
851 "You requested OpenMP parallelization, which is not supported with TPI.");
853 hw_opt->nthreads_omp = 1;
856 if (GMX_THREAD_MPI)
859 GMX_RELEASE_ASSERT(hw_opt->nthreads_tmpi >= 1, "Must have at least one thread-MPI rank");
861 /* If the user set the total number of threads on the command line
862 * and did not specify the number of OpenMP threads, set the latter here.
864 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp <= 0)
866 hw_opt->nthreads_omp = hw_opt->nthreads_tot / hw_opt->nthreads_tmpi;
868 if (!GMX_OPENMP && hw_opt->nthreads_omp > 1)
870 gmx_fatal(FARGS,
871 "You (indirectly) asked for OpenMP threads by setting -nt > -ntmpi, but "
872 "GROMACS was "
873 "compiled without OpenMP support");
877 /* With both non-bonded and PME on GPU, the work left on the CPU is often
878 * (much) slower with SMT than without SMT. This is mostly the case with
879 * few atoms per core. Thus, if the number of threads is set to auto,
880 * we turn off SMT in that case. Note that PME on GPU implies that also
881 * the non-bonded are computed on the GPU.
882 * We only need to do this when the number of hardware theads is larger
883 * than the number of cores. Note that a queuing system could limit
884 * the number of hardware threads available, but we are not trying to be
885 * too smart here in that case.
887 /* The thread reduction and synchronization costs go up roughy quadratically
888 * with the threads count, so we apply a threshold quadratic in #cores.
889 * Also more cores per GPU usually means the CPU gets faster than the GPU.
890 * The number 1000 atoms per core^2 is a reasonable threshold
891 * for Intel x86 and AMD Threadripper.
893 constexpr int c_numAtomsPerCoreSquaredSmtThreshold = 1000;
895 /* Prepare conditions for deciding if we should disable SMT.
896 * We currently only limit SMT for simulations using a single rank.
897 * TODO: Consider limiting also for multi-rank simulations.
899 bool canChooseNumOpenmpThreads = (GMX_OPENMP && hw_opt->nthreads_omp <= 0);
900 bool haveSmtSupport =
901 (hwinfo.hardwareTopology->supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic
902 && hwinfo.hardwareTopology->machine().logicalProcessorCount
903 > hwinfo.hardwareTopology->numberOfCores());
904 bool simRunsSingleRankNBAndPmeOnGpu = (cr->nnodes == 1 && pmeRunMode == PmeRunMode::GPU);
906 if (canChooseNumOpenmpThreads && haveSmtSupport && simRunsSingleRankNBAndPmeOnGpu)
908 /* Note that the queing system might have limited us from using
909 * all detected ncore_tot physical cores. We are currently not
910 * checking for that here.
912 int numRanksTot = cr->nnodes * (isMultiSim(ms) ? ms->numSimulations_ : 1);
913 int numAtomsPerRank = mtop.natoms / cr->nnodes;
914 int numCoresPerRank = hwinfo.ncore_tot / numRanksTot;
915 if (numAtomsPerRank < c_numAtomsPerCoreSquaredSmtThreshold * gmx::square(numCoresPerRank))
917 /* Choose one OpenMP thread per physical core */
918 hw_opt->nthreads_omp =
919 std::max(1, hwinfo.hardwareTopology->numberOfCores() / numRanksOnThisNode);
923 GMX_RELEASE_ASSERT(GMX_OPENMP || hw_opt->nthreads_omp == 1,
924 "Without OpenMP support, only one thread per rank can be used");
926 /* We are done with updating nthreads_omp, we can set nthreads_omp_pme */
927 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
929 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
932 if (debug)
934 print_hw_opt(debug, hw_opt);
938 namespace gmx
941 void checkHardwareOversubscription(int numThreadsOnThisRank,
942 int rank,
943 const HardwareTopology& hwTop,
944 const PhysicalNodeCommunicator& comm,
945 const MDLogger& mdlog)
947 if (hwTop.supportLevel() < HardwareTopology::SupportLevel::LogicalProcessorCount)
949 /* There is nothing we can check */
950 return;
953 int numRanksOnThisNode = comm.size_;
954 int numThreadsOnThisNode = numThreadsOnThisRank;
955 /* Avoid MPI calls with uninitialized thread-MPI communicators */
956 if (comm.size_ > 1)
958 #if GMX_MPI
959 /* Count the threads within this physical node */
960 MPI_Allreduce(&numThreadsOnThisRank, &numThreadsOnThisNode, 1, MPI_INT, MPI_SUM, comm.comm_);
961 #endif
964 if (numThreadsOnThisNode > hwTop.machine().logicalProcessorCount)
966 std::string mesg = "WARNING: ";
967 if (GMX_LIB_MPI)
969 mesg += formatString("On rank %d: o", rank);
971 else
973 mesg += "O";
975 mesg += formatString("versubscribing the available %d logical CPU cores",
976 hwTop.machine().logicalProcessorCount);
977 if (GMX_LIB_MPI)
979 mesg += " per node";
981 mesg += formatString(" with %d ", numThreadsOnThisNode);
982 if (numRanksOnThisNode == numThreadsOnThisNode)
984 if (GMX_THREAD_MPI)
986 mesg += "thread-MPI threads.";
988 else
990 mesg += "MPI processes.";
993 else
995 mesg += "threads.";
997 mesg += "\n This will cause considerable performance loss.";
998 /* Note that only the master rank logs to stderr and only ranks
999 * with an open log file write to log.
1000 * TODO: When we have a proper parallel logging framework,
1001 * the framework should add the rank and node numbers.
1003 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("%s", mesg.c_str());
1007 } // namespace gmx