2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Defines utility functionality for dividing resources and
37 * checking for consistency and usefulness.
39 * \author Mark Abraham <mark.j.abraham@gmail.com>
40 * \ingroup module_taskassignment
45 #include "resourcedivision.h"
54 #include "gromacs/ewald/pme.h"
55 #include "gromacs/hardware/cpuinfo.h"
56 #include "gromacs/hardware/detecthardware.h"
57 #include "gromacs/hardware/hardwaretopology.h"
58 #include "gromacs/hardware/hw_info.h"
59 #include "gromacs/math/functions.h"
60 #include "gromacs/mdlib/gmx_omp_nthreads.h"
61 #include "gromacs/mdtypes/commrec.h"
62 #include "gromacs/mdtypes/inputrec.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/topology/mtop_util.h"
65 #include "gromacs/topology/topology.h"
66 #include "gromacs/utility/baseversion.h"
67 #include "gromacs/utility/fatalerror.h"
68 #include "gromacs/utility/gmxassert.h"
69 #include "gromacs/utility/logger.h"
70 #include "gromacs/utility/physicalnodecommunicator.h"
71 #include "gromacs/utility/stringutil.h"
74 /* DISCLAIMER: All the atom count and thread numbers below are heuristic.
75 * The real switching points will depend on the system simulation,
76 * the algorithms used and the hardware it's running on, as well as if there
77 * are other jobs running on the same machine. We try to take into account
78 * factors that have a large influence, such as recent Intel CPUs being
79 * much better at wide multi-threading. The remaining factors should
80 * (hopefully) have a small influence, such that the performance just before
81 * and after a switch point doesn't change too much.
84 /*! \brief The minimum number of atoms per thread-MPI thread when GPUs
85 * are present. With fewer atoms than this, the number of thread-MPI
86 * ranks will get lowered.
88 static constexpr int min_atoms_per_mpi_thread
= 90;
89 /*! \brief The minimum number of atoms per GPU with thread-MPI
90 * active. With fewer atoms than this, the number of thread-MPI ranks
93 static constexpr int min_atoms_per_gpu
= 900;
96 /*! \brief Constants for implementing default divisions of threads */
98 /* TODO choose nthreads_omp based on hardware topology
99 when we have a hardware topology detection library */
100 /* First we consider the case of no MPI (1 MPI rank).
101 * In general, when running up to 8 threads, OpenMP should be faster.
102 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
103 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
104 * even on two CPUs it's usually faster (but with many OpenMP threads
105 * it could be faster not to use HT, currently we always use HT).
106 * On Nehalem/Westmere we want to avoid running 16 threads over
107 * two CPUs with HT, so we need a limit<16; thus we use 12.
108 * A reasonable limit for Intel Sandy and Ivy bridge,
109 * not knowing the topology, is 16 threads.
110 * Below we check for Intel and AVX, which for now includes
111 * Sandy/Ivy Bridge, Has/Broadwell. By checking for AVX instead of
112 * model numbers we ensure also future Intel CPUs are covered.
114 constexpr int nthreads_omp_faster_default
= 8;
115 constexpr int nthreads_omp_faster_Nehalem
= 12;
116 constexpr int nthreads_omp_faster_Intel_AVX
= 16;
117 constexpr int nthreads_omp_faster_AMD_Ryzen
= 16;
118 /* For CPU only runs the fastest options are usually MPI or OpenMP only.
119 * With one GPU, using MPI only is almost never optimal, so we need to
120 * compare running pure OpenMP with combined MPI+OpenMP. This means higher
121 * OpenMP threads counts can still be ok. Multiplying the numbers above
122 * by a factor of 2 seems to be a good estimate.
124 constexpr int nthreads_omp_faster_gpu_fac
= 2;
126 /* This is the case with MPI (2 or more MPI PP ranks).
127 * By default we will terminate with a fatal error when more than 8
128 * OpenMP thread are (indirectly) requested, since using less threads
129 * nearly always results in better performance.
130 * With thread-mpi and multiple GPUs or one GPU and too many threads
131 * we first try 6 OpenMP threads and then less until the number of MPI ranks
132 * is divisible by the number of GPUs.
134 constexpr int nthreads_omp_mpi_ok_max
= 8;
135 constexpr int nthreads_omp_mpi_ok_min_cpu
= 1;
136 constexpr int nthreads_omp_mpi_ok_min_gpu
= 2;
137 constexpr int nthreads_omp_mpi_target_max
= 6;
141 /*! \brief Returns the maximum OpenMP thread count for which using a single MPI rank
142 * should be faster than using multiple ranks with the same total thread count.
144 static int nthreads_omp_faster(const gmx::CpuInfo
&cpuInfo
, gmx_bool bUseGPU
)
148 if (cpuInfo
.vendor() == gmx::CpuInfo::Vendor::Intel
&&
149 cpuInfo
.feature(gmx::CpuInfo::Feature::X86_Avx
))
151 nth
= nthreads_omp_faster_Intel_AVX
;
153 else if (gmx::cpuIsX86Nehalem(cpuInfo
))
156 nth
= nthreads_omp_faster_Nehalem
;
158 else if (cpuInfo
.vendor() == gmx::CpuInfo::Vendor::Amd
&& cpuInfo
.family() >= 23)
161 nth
= nthreads_omp_faster_AMD_Ryzen
;
165 nth
= nthreads_omp_faster_default
;
170 nth
*= nthreads_omp_faster_gpu_fac
;
173 nth
= std::min(nth
, GMX_OPENMP_MAX_THREADS
);
178 /*! \brief Returns that maximum OpenMP thread count that passes the efficiency check */
179 gmx_unused
static int nthreads_omp_efficient_max(int gmx_unused nrank
,
180 const gmx::CpuInfo
&cpuInfo
,
183 if (GMX_OPENMP
&& GMX_MPI
&& (nrank
> 1))
185 return nthreads_omp_mpi_ok_max
;
189 return nthreads_omp_faster(cpuInfo
, bUseGPU
);
193 /*! \brief Return the number of thread-MPI ranks to use.
194 * This is chosen such that we can always obey our own efficiency checks.
196 gmx_unused
static int get_tmpi_omp_thread_division(const gmx_hw_info_t
*hwinfo
,
197 const gmx_hw_opt_t
&hw_opt
,
202 const gmx::CpuInfo
&cpuInfo
= *hwinfo
->cpuInfo
;
204 GMX_RELEASE_ASSERT(nthreads_tot
> 0, "There must be at least one thread per rank");
206 /* There are no separate PME nodes here, as we ensured in
207 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
208 * and a conditional ensures we would not have ended up here.
209 * Note that separate PME nodes might be switched on later.
213 if (hw_opt
.nthreads_omp
> 0)
215 /* In this case it is unclear if we should use 1 rank per GPU
216 * or more or less, so we require also setting the number of ranks.
218 gmx_fatal(FARGS
, "When using GPUs, setting the number of OpenMP threads without specifying the number "
219 "of ranks can lead to conflicting demands. Please specify the number of thread-MPI ranks "
220 "as well (option -ntmpi).");
225 /* When the user sets nthreads_omp, we can end up oversubscribing CPU cores
226 * if we simply start as many ranks as GPUs. To avoid this, we start as few
227 * tMPI ranks as necessary to avoid oversubscription and instead leave GPUs idle.
228 * If the user does not set the number of OpenMP threads, nthreads_omp==0 and
229 * this code has no effect.
231 GMX_RELEASE_ASSERT(hw_opt
.nthreads_omp
>= 0, "nthreads_omp is negative, but previous checks should "
232 "have prevented this");
233 while (nrank
*hw_opt
.nthreads_omp
> hwinfo
->nthreads_hw_avail
&& nrank
> 1)
238 if (nthreads_tot
< nrank
)
240 /* #thread < #gpu is very unlikely, but if so: waste gpu(s) */
241 nrank
= nthreads_tot
;
243 else if (nthreads_tot
> nthreads_omp_faster(cpuInfo
, ngpu
> 0) ||
244 (ngpu
> 1 && nthreads_tot
/ngpu
> nthreads_omp_mpi_target_max
))
246 /* The high OpenMP thread count will likely result in sub-optimal
247 * performance. Increase the rank count to reduce the thread count
248 * per rank. This will lead to GPU sharing by MPI ranks/threads.
252 /* Increase the rank count as long as have we more than 6 OpenMP
253 * threads per rank or the number of hardware threads is not
254 * divisible by the rank count. Don't go below 2 OpenMP threads.
262 while (nthreads_tot
/nrank
> nthreads_omp_mpi_target_max
||
263 (nthreads_tot
/(ngpu
*(nshare
+ 1)) >= nthreads_omp_mpi_ok_min_gpu
&& nthreads_tot
% nrank
!= 0));
266 else if (hw_opt
.nthreads_omp
> 0)
268 /* Here we could oversubscribe, when we do, we issue a warning later */
269 nrank
= std::max(1, nthreads_tot
/hw_opt
.nthreads_omp
);
273 if (nthreads_tot
<= nthreads_omp_faster(cpuInfo
, ngpu
> 0))
275 /* Use pure OpenMP parallelization */
280 /* Don't use OpenMP parallelization */
281 nrank
= nthreads_tot
;
288 //! Return whether hyper threading is enabled.
290 gmxSmtIsEnabled(const gmx::HardwareTopology
&hwTop
)
292 return (hwTop
.supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic
&& hwTop
.machine().sockets
[0].cores
[0].hwThreads
.size() > 1);
298 //! Handles checks for algorithms that must use a single rank.
299 class SingleRankChecker
302 SingleRankChecker() : value_(false) {}
303 /*! \brief Call this function for each possible condition
304 under which a single rank is required, along with a string
305 describing the constraint when it is applied. */
306 void applyConstraint(bool condition
, const char *description
)
311 reasons_
.push_back(gmx::formatString("%s only supports a single rank.", description
));
314 //! After applying any conditions, is a single rank required?
315 bool mustUseOneRank() const
319 /*! \brief Return a formatted string to use when writing a
320 message when a single rank is required, (or empty if no
321 constraint exists.) */
322 std::string
getMessage() const
324 return formatAndJoin(reasons_
, "\n", gmx::IdentityFormatter());
328 std::vector
<std::string
> reasons_
;
333 /* Get the number of MPI ranks to use for thread-MPI based on how many
334 * were requested, which algorithms we're using,
335 * and how many particles there are.
336 * At the point we have already called check_and_update_hw_opt.
337 * Thus all options should be internally consistent and consistent
338 * with the hardware, except that ntmpi could be larger than #GPU.
340 int get_nthreads_mpi(const gmx_hw_info_t
*hwinfo
,
341 gmx_hw_opt_t
*hw_opt
,
342 const std::vector
<int> &gpuIdsToUse
,
345 const t_inputrec
*inputrec
,
346 const gmx_mtop_t
*mtop
,
347 const gmx::MDLogger
&mdlog
,
350 int nthreads_hw
, nthreads_tot_max
, nrank
, ngpu
;
351 int min_atoms_per_mpi_rank
;
353 const gmx::CpuInfo
&cpuInfo
= *hwinfo
->cpuInfo
;
354 const gmx::HardwareTopology
&hwTop
= *hwinfo
->hardwareTopology
;
358 GMX_RELEASE_ASSERT((EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
)) &&
359 pme_gpu_supports_build(nullptr) &&
360 pme_gpu_supports_hardware(*hwinfo
, nullptr) &&
361 pme_gpu_supports_input(*inputrec
, *mtop
, nullptr),
362 "PME can't be on GPUs unless we are using PME");
364 // PME on GPUs supports a single PME rank with PP running on the same or few other ranks.
365 // For now, let's treat separate PME GPU rank as opt-in.
366 if (hw_opt
->nthreads_tmpi
< 1)
373 /* Check if an algorithm does not support parallel simulation. */
374 // TODO This might work better if e.g. implemented algorithms
375 // had to define a function that returns such requirements,
376 // and a description string.
377 SingleRankChecker checker
;
378 checker
.applyConstraint(inputrec
->eI
== eiLBFGS
, "L-BFGS minimization");
379 checker
.applyConstraint(inputrec
->coulombtype
== eelEWALD
, "Plain Ewald electrostatics");
380 checker
.applyConstraint(doMembed
, "Membrane embedding");
381 bool useOrientationRestraints
= (gmx_mtop_ftype_count(mtop
, F_ORIRES
) > 0);
382 checker
.applyConstraint(useOrientationRestraints
, "Orientation restraints");
383 if (checker
.mustUseOneRank())
385 std::string message
= checker
.getMessage();
386 if (hw_opt
->nthreads_tmpi
> 1)
388 gmx_fatal(FARGS
, "%s However, you asked for more than 1 thread-MPI rank, so mdrun cannot continue. "
389 "Choose a single rank, or a different algorithm.", message
.c_str());
391 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted("%s Choosing to use only a single thread-MPI rank.", message
.c_str());
396 if (hw_opt
->nthreads_tmpi
> 0)
398 /* Trivial, return the user's choice right away */
399 return hw_opt
->nthreads_tmpi
;
402 // Now implement automatic selection of number of thread-MPI ranks
403 nthreads_hw
= hwinfo
->nthreads_hw_avail
;
405 if (nthreads_hw
<= 0)
407 /* This should normally not happen, but if it does, we handle it */
408 gmx_fatal(FARGS
, "The number of available hardware threads can not be detected, please specify the number of "
409 "MPI ranks and the number of OpenMP threads (if supported) manually with options "
410 "-ntmpi and -ntomp, respectively");
413 /* How many total (#tMPI*#OpenMP) threads can we start? */
414 if (hw_opt
->nthreads_tot
> 0)
416 nthreads_tot_max
= hw_opt
->nthreads_tot
;
420 nthreads_tot_max
= nthreads_hw
;
423 /* nonbondedOnGpu might be false e.g. because this simulation uses
424 * the group scheme, or is a rerun with energy groups. */
425 ngpu
= (nonbondedOnGpu
? static_cast<int>(gpuIdsToUse
.size()) : 0);
427 if (inputrec
->cutoff_scheme
== ecutsGROUP
)
429 /* We checked this before, but it doesn't hurt to do it once more */
430 GMX_RELEASE_ASSERT(hw_opt
->nthreads_omp
== 1, "The group scheme only supports one OpenMP thread per rank");
434 get_tmpi_omp_thread_division(hwinfo
, *hw_opt
, nthreads_tot_max
, ngpu
);
436 if (inputrec
->eI
== eiNM
|| EI_TPI(inputrec
->eI
))
438 /* Dims/steps are divided over the nodes iso splitting the atoms.
439 * With NM we can't have more ranks than #atoms*#dim. With TPI it's
440 * unlikely we have fewer atoms than ranks, and if so, communication
441 * would become a bottleneck, so we set the limit to 1 atom/rank.
443 min_atoms_per_mpi_rank
= 1;
449 min_atoms_per_mpi_rank
= min_atoms_per_gpu
;
453 min_atoms_per_mpi_rank
= min_atoms_per_mpi_thread
;
457 if (mtop
->natoms
/nrank
< min_atoms_per_mpi_rank
)
461 /* the rank number was chosen automatically, but there are too few
462 atoms per rank, so we need to reduce the rank count */
463 nrank_new
= std::max(1, mtop
->natoms
/min_atoms_per_mpi_rank
);
465 /* Avoid partial use of Hyper-Threading */
466 if (gmxSmtIsEnabled(hwTop
) &&
467 nrank_new
> nthreads_hw
/2 && nrank_new
< nthreads_hw
)
469 nrank_new
= nthreads_hw
/2;
472 /* If the user specified the total thread count, ensure this is
473 * divisible by the number of ranks.
474 * It is quite likely that we have too many total threads compared
475 * to the size of the system, but if the user asked for this many
476 * threads we should respect that.
478 while (hw_opt
->nthreads_tot
> 0 &&
479 hw_opt
->nthreads_tot
% nrank_new
!= 0)
484 /* Avoid large prime numbers in the rank count */
487 /* Use only 6,8,10 with additional factors of 2 */
491 while (3*fac
*2 <= nrank_new
)
496 nrank_new
= (nrank_new
/fac
)*fac
;
500 /* Avoid 5, since small system won't fit 5 domains along
501 * a dimension. This might lead to waisting some cores, but this
502 * will have a small impact in this regime of very small systems.
512 /* We reduced the number of tMPI ranks, which means we might violate
513 * our own efficiency checks if we simply use all hardware threads.
515 if (GMX_OPENMP
&& hw_opt
->nthreads_omp
<= 0 && hw_opt
->nthreads_tot
<= 0)
517 /* The user set neither the total nor the OpenMP thread count,
518 * we should use all hardware threads, unless we will violate
519 * our own efficiency limitation on the thread count.
523 nt_omp_max
= nthreads_omp_efficient_max(nrank
, cpuInfo
, ngpu
>= 1);
525 if (nrank
*nt_omp_max
< hwinfo
->nthreads_hw_avail
)
527 /* Limit the number of OpenMP threads to start */
528 hw_opt
->nthreads_omp
= nt_omp_max
;
532 fprintf(stderr
, "\n");
533 fprintf(stderr
, "NOTE: Parallelization is limited by the small number of atoms,\n");
534 fprintf(stderr
, " only starting %d thread-MPI ranks.\n", nrank
);
535 fprintf(stderr
, " You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
542 void check_resource_division_efficiency(const gmx_hw_info_t
*hwinfo
,
543 bool willUsePhysicalGpu
,
544 gmx_bool bNtOmpOptionSet
,
546 const gmx::MDLogger
&mdlog
)
548 #if GMX_OPENMP && GMX_MPI
549 GMX_UNUSED_VALUE(hwinfo
);
551 int nth_omp_min
, nth_omp_max
;
553 const char *mpi_option
= GMX_THREAD_MPI
? " (option -ntmpi)" : "";
555 /* This function should be called after thread-MPI (when configured) and
556 * OpenMP have been initialized. Check that here.
560 GMX_RELEASE_ASSERT(nthreads_omp_faster_default
>= nthreads_omp_mpi_ok_max
,
561 "Inconsistent OpenMP thread count default values");
563 GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault
) >= 1, "Must have at least one OpenMP thread");
565 nth_omp_min
= gmx_omp_nthreads_get(emntDefault
);
566 nth_omp_max
= gmx_omp_nthreads_get(emntDefault
);
568 bool anyRankIsUsingGpus
= willUsePhysicalGpu
;
569 /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
570 if (cr
->nnodes
+ cr
->npmenodes
> 1)
572 int count
[3], count_max
[3];
574 count
[0] = -nth_omp_min
;
575 count
[1] = nth_omp_max
;
576 count
[2] = int(willUsePhysicalGpu
);
578 MPI_Allreduce(count
, count_max
, 3, MPI_INT
, MPI_MAX
, cr
->mpi_comm_mysim
);
580 /* In case of an inhomogeneous run setup we use the maximum counts */
581 nth_omp_min
= -count_max
[0];
582 nth_omp_max
= count_max
[1];
583 anyRankIsUsingGpus
= count_max
[2] > 0;
586 int nthreads_omp_mpi_ok_min
;
588 if (!anyRankIsUsingGpus
)
590 nthreads_omp_mpi_ok_min
= nthreads_omp_mpi_ok_min_cpu
;
594 /* With GPUs we set the minimum number of OpenMP threads to 2 to catch
595 * cases where the user specifies #ranks == #cores.
597 nthreads_omp_mpi_ok_min
= nthreads_omp_mpi_ok_min_gpu
;
600 if (DOMAINDECOMP(cr
))
602 if (nth_omp_max
< nthreads_omp_mpi_ok_min
||
603 nth_omp_max
> nthreads_omp_mpi_ok_max
)
605 /* Note that we print target_max here, not ok_max */
606 sprintf(buf
, "Your choice of number of MPI ranks and amount of resources results in using %d OpenMP "
607 "threads per rank, which is most likely inefficient. The optimum is usually between %d and"
608 " %d threads per rank.",
610 nthreads_omp_mpi_ok_min
,
611 nthreads_omp_mpi_target_max
);
615 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted("NOTE: %s", buf
);
619 /* This fatal error, and the one below, is nasty, but it's
620 * probably the only way to ensure that all users don't waste
621 * a lot of resources, since many users don't read logs/stderr.
623 gmx_fatal(FARGS
, "%s If you want to run with this setup, specify the -ntomp option. But we suggest to "
624 "change the number of MPI ranks%s.",
629 #else // !GMX_OPENMP || ! GMX_MPI
630 GMX_UNUSED_VALUE(bNtOmpOptionSet
);
631 GMX_UNUSED_VALUE(willUsePhysicalGpu
);
632 GMX_UNUSED_VALUE(cr
);
633 GMX_UNUSED_VALUE(nthreads_omp_mpi_ok_max
);
634 GMX_UNUSED_VALUE(nthreads_omp_mpi_ok_min_cpu
);
635 /* Check if we have more than 1 physical core, if detected,
636 * or more than 1 hardware thread if physical cores were not detected.
638 if (!GMX_OPENMP
&& !GMX_MPI
&& hwinfo
->hardwareTopology
->numberOfCores() > 1)
640 GMX_LOG(mdlog
.warning
).asParagraph().appendText(
641 "NOTE: GROMACS was compiled without OpenMP and (thread-)MPI support, can only use a single CPU core");
643 #endif // end GMX_OPENMP && GMX_MPI
647 //! Dump a \c hw_opt to \c fp.
648 static void print_hw_opt(FILE *fp
, const gmx_hw_opt_t
*hw_opt
)
650 fprintf(fp
, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s' gputasks '%s'\n",
651 hw_opt
->nthreads_tot
,
652 hw_opt
->nthreads_tmpi
,
653 hw_opt
->nthreads_omp
,
654 hw_opt
->nthreads_omp_pme
,
655 hw_opt
->gpuIdsAvailable
.c_str(),
656 hw_opt
->userGpuTaskAssignment
.c_str());
659 void check_and_update_hw_opt_1(const gmx::MDLogger
&mdlog
,
660 gmx_hw_opt_t
*hw_opt
,
664 /* Currently hw_opt only contains default settings or settings supplied
665 * by the user on the command line.
667 if (hw_opt
->nthreads_omp
< 0)
669 gmx_fatal(FARGS
, "The number of OpenMP threads supplied on the command line is %d, which is negative "
670 "and not allowed", hw_opt
->nthreads_omp
);
673 /* Check for OpenMP settings stored in environment variables, which can
674 * potentially be different on different MPI ranks.
676 gmx_omp_nthreads_read_env(mdlog
, &hw_opt
->nthreads_omp
);
678 /* Check restrictions on the user supplied options before modifying them.
679 * TODO: Put the user values in a const struct and preserve them.
684 if (hw_opt
->nthreads_tot
> 0)
686 gmx_fatal(FARGS
, "Setting the total number of threads is only supported with thread-MPI and GROMACS was "
687 "compiled without thread-MPI");
689 if (hw_opt
->nthreads_tmpi
> 0)
691 gmx_fatal(FARGS
, "Setting the number of thread-MPI ranks is only supported with thread-MPI and GROMACS was "
692 "compiled without thread-MPI");
695 /* With thread-MPI the master thread sets hw_opt->totNumThreadsIsAuto.
696 * The other threads receive a partially processed hw_opt from the master
697 * thread and should not set hw_opt->totNumThreadsIsAuto again.
699 if (!GMX_THREAD_MPI
|| SIMMASTER(cr
))
701 /* Check if mdrun is free to choose the total number of threads */
702 hw_opt
->totNumThreadsIsAuto
= (hw_opt
->nthreads_omp
== 0 && hw_opt
->nthreads_omp_pme
== 0 && hw_opt
->nthreads_tot
== 0);
707 /* Check restrictions on PME thread related options set by the user */
709 if (hw_opt
->nthreads_omp_pme
> 0 && hw_opt
->nthreads_omp
<= 0)
711 gmx_fatal(FARGS
, "You need to specify -ntomp in addition to -ntomp_pme");
714 if (hw_opt
->nthreads_omp_pme
>= 1 &&
715 hw_opt
->nthreads_omp_pme
!= hw_opt
->nthreads_omp
&&
718 /* This can result in a fatal error on many MPI ranks,
719 * but since the thread count can differ per rank,
720 * we can't easily avoid this.
722 gmx_fatal(FARGS
, "You need to explicitly specify the number of PME ranks (-npme) when using "
723 "different numbers of OpenMP threads for PP and PME ranks");
728 /* GROMACS was configured without OpenMP support */
730 if (hw_opt
->nthreads_omp
> 1 || hw_opt
->nthreads_omp_pme
> 1)
732 gmx_fatal(FARGS
, "More than 1 OpenMP thread requested, but GROMACS was compiled without OpenMP support");
734 hw_opt
->nthreads_omp
= 1;
735 hw_opt
->nthreads_omp_pme
= 1;
738 if (hw_opt
->nthreads_tot
> 0 && hw_opt
->nthreads_omp_pme
<= 0)
740 /* We have the same number of OpenMP threads for PP and PME ranks,
741 * thus we can perform several consistency checks.
743 if (hw_opt
->nthreads_tmpi
> 0 &&
744 hw_opt
->nthreads_omp
> 0 &&
745 hw_opt
->nthreads_tot
!= hw_opt
->nthreads_tmpi
*hw_opt
->nthreads_omp
)
747 gmx_fatal(FARGS
, "The total number of threads requested (%d) does not match the thread-MPI ranks (%d) "
748 "times the OpenMP threads (%d) requested",
749 hw_opt
->nthreads_tot
, hw_opt
->nthreads_tmpi
, hw_opt
->nthreads_omp
);
752 if (hw_opt
->nthreads_tmpi
> 0 &&
753 hw_opt
->nthreads_tot
% hw_opt
->nthreads_tmpi
!= 0)
755 gmx_fatal(FARGS
, "The total number of threads requested (%d) is not divisible by the number of thread-MPI "
756 "ranks requested (%d)",
757 hw_opt
->nthreads_tot
, hw_opt
->nthreads_tmpi
);
760 if (hw_opt
->nthreads_omp
> 0 &&
761 hw_opt
->nthreads_tot
% hw_opt
->nthreads_omp
!= 0)
763 gmx_fatal(FARGS
, "The total number of threads requested (%d) is not divisible by the number of OpenMP "
764 "threads requested (%d)",
765 hw_opt
->nthreads_tot
, hw_opt
->nthreads_omp
);
769 if (hw_opt
->nthreads_tot
> 0)
771 if (hw_opt
->nthreads_omp
> hw_opt
->nthreads_tot
)
773 gmx_fatal(FARGS
, "You requested %d OpenMP threads with %d total threads. Choose a total number of threads "
774 "that is a multiple of the number of OpenMP threads.",
775 hw_opt
->nthreads_omp
, hw_opt
->nthreads_tot
);
778 if (hw_opt
->nthreads_tmpi
> hw_opt
->nthreads_tot
)
780 gmx_fatal(FARGS
, "You requested %d thread-MPI ranks with %d total threads. Choose a total number of "
781 "threads that is a multiple of the number of thread-MPI ranks.",
782 hw_opt
->nthreads_tmpi
, hw_opt
->nthreads_tot
);
788 print_hw_opt(debug
, hw_opt
);
791 /* Asserting this simplifies the hardware resource division later
793 GMX_RELEASE_ASSERT(!(hw_opt
->nthreads_omp_pme
>= 1 && hw_opt
->nthreads_omp
<= 0),
794 "PME thread count should only be set when the normal thread count is also set");
797 void check_and_update_hw_opt_2(gmx_hw_opt_t
*hw_opt
,
800 if (cutoff_scheme
== ecutsGROUP
)
802 /* We only have OpenMP support for PME only nodes */
803 if (hw_opt
->nthreads_omp
> 1)
805 gmx_fatal(FARGS
, "OpenMP threads have been requested with cut-off scheme %s, but these are only supported "
806 "with cut-off scheme %s",
807 ecutscheme_names
[cutoff_scheme
],
808 ecutscheme_names
[ecutsVERLET
]);
810 hw_opt
->nthreads_omp
= 1;
814 void checkAndUpdateRequestedNumOpenmpThreads(gmx_hw_opt_t
*hw_opt
,
815 const gmx_hw_info_t
&hwinfo
,
817 const gmx_multisim_t
*ms
,
818 int numRanksOnThisNode
,
819 PmeRunMode pmeRunMode
,
820 const gmx_mtop_t
&mtop
)
825 GMX_RELEASE_ASSERT(hw_opt
->nthreads_tmpi
>= 1, "Must have at least one thread-MPI rank");
827 /* If the user set the total number of threads on the command line
828 * and did not specify the number of OpenMP threads, set the latter here.
830 if (hw_opt
->nthreads_tot
> 0 && hw_opt
->nthreads_omp
<= 0)
832 hw_opt
->nthreads_omp
= hw_opt
->nthreads_tot
/ hw_opt
->nthreads_tmpi
;
834 if (!GMX_OPENMP
&& hw_opt
->nthreads_omp
> 1)
836 gmx_fatal(FARGS
, "You (indirectly) asked for OpenMP threads by setting -nt > -ntmpi, but GROMACS was "
837 "compiled without OpenMP support");
841 /* With both non-bonded and PME on GPU, the work left on the CPU is often
842 * (much) slower with SMT than without SMT. This is mostly the case with
843 * few atoms per core. Thus, if the number of threads is set to auto,
844 * we turn off SMT in that case. Note that PME on GPU implies that also
845 * the non-bonded are computed on the GPU.
846 * We only need to do this when the number of hardware theads is larger
847 * than the number of cores. Note that a queuing system could limit
848 * the number of hardware threads available, but we are not trying to be
849 * too smart here in that case.
851 /* The thread reduction and synchronization costs go up roughy quadratically
852 * with the threads count, so we apply a threshold quadratic in #cores.
853 * Also more cores per GPU usually means the CPU gets faster than the GPU.
854 * The number 1000 atoms per core^2 is a reasonable threshold
855 * for Intel x86 and AMD Threadripper.
857 constexpr int c_numAtomsPerCoreSquaredSmtThreshold
= 1000;
859 /* Prepare conditions for deciding if we should disable SMT.
860 * We currently only limit SMT for simulations using a single rank.
861 * TODO: Consider limiting also for multi-rank simulations.
863 bool canChooseNumOpenmpThreads
= (GMX_OPENMP
&& hw_opt
->nthreads_omp
<= 0);
864 bool haveSmtSupport
= (hwinfo
.hardwareTopology
->supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic
&&
865 hwinfo
.hardwareTopology
->machine().logicalProcessorCount
> hwinfo
.hardwareTopology
->numberOfCores());
866 bool simRunsSingleRankNBAndPmeOnGpu
= (cr
->nnodes
== 1 && pmeRunMode
== PmeRunMode::GPU
);
868 if (canChooseNumOpenmpThreads
&& haveSmtSupport
&&
869 simRunsSingleRankNBAndPmeOnGpu
)
871 /* Note that the queing system might have limited us from using
872 * all detected ncore_tot physical cores. We are currently not
873 * checking for that here.
875 int numRanksTot
= cr
->nnodes
*(isMultiSim(ms
) ? ms
->nsim
: 1);
876 int numAtomsPerRank
= mtop
.natoms
/cr
->nnodes
;
877 int numCoresPerRank
= hwinfo
.ncore_tot
/numRanksTot
;
878 if (numAtomsPerRank
< c_numAtomsPerCoreSquaredSmtThreshold
*gmx::square(numCoresPerRank
))
880 /* Choose one OpenMP thread per physical core */
881 hw_opt
->nthreads_omp
= std::max(1, hwinfo
.hardwareTopology
->numberOfCores()/numRanksOnThisNode
);
885 GMX_RELEASE_ASSERT(GMX_OPENMP
|| hw_opt
->nthreads_omp
== 1, "Without OpenMP support, only one thread per rank can be used");
887 /* We are done with updating nthreads_omp, we can set nthreads_omp_pme */
888 if (hw_opt
->nthreads_omp_pme
<= 0 && hw_opt
->nthreads_omp
> 0)
890 hw_opt
->nthreads_omp_pme
= hw_opt
->nthreads_omp
;
895 print_hw_opt(debug
, hw_opt
);
902 void checkHardwareOversubscription(int numThreadsOnThisRank
,
904 const HardwareTopology
&hwTop
,
905 const PhysicalNodeCommunicator
&comm
,
906 const MDLogger
&mdlog
)
908 if (hwTop
.supportLevel() < HardwareTopology::SupportLevel::LogicalProcessorCount
)
910 /* There is nothing we can check */
914 int numRanksOnThisNode
= comm
.size_
;
915 int numThreadsOnThisNode
= numThreadsOnThisRank
;
916 /* Avoid MPI calls with uninitialized thread-MPI communicators */
920 /* Count the threads within this physical node */
921 MPI_Allreduce(&numThreadsOnThisRank
, &numThreadsOnThisNode
, 1, MPI_INT
, MPI_SUM
, comm
.comm_
);
925 if (numThreadsOnThisNode
> hwTop
.machine().logicalProcessorCount
)
927 std::string mesg
= "WARNING: ";
930 mesg
+= formatString("On rank %d: o", rank
);
936 mesg
+= formatString("versubscribing the available %d logical CPU cores", hwTop
.machine().logicalProcessorCount
);
941 mesg
+= formatString(" with %d ", numThreadsOnThisNode
);
942 if (numRanksOnThisNode
== numThreadsOnThisNode
)
946 mesg
+= "thread-MPI threads.";
950 mesg
+= "MPI processes.";
957 mesg
+= "\n This will cause considerable performance loss.";
958 /* Note that only the master rank logs to stderr and only ranks
959 * with an open log file write to log.
960 * TODO: When we have a proper parallel logging framework,
961 * the framework should add the rank and node numbers.
963 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted("%s", mesg
.c_str());