2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "resource-division.h"
47 #include "gromacs/hardware/cpuinfo.h"
48 #include "gromacs/hardware/detecthardware.h"
49 #include "gromacs/hardware/gpu_hw_info.h"
50 #include "gromacs/hardware/hardwaretopology.h"
51 #include "gromacs/hardware/hw_info.h"
52 #include "gromacs/mdlib/gmx_omp_nthreads.h"
53 #include "gromacs/mdtypes/commrec.h"
54 #include "gromacs/mdtypes/inputrec.h"
55 #include "gromacs/mdtypes/md_enums.h"
56 #include "gromacs/topology/topology.h"
57 #include "gromacs/utility/baseversion.h"
58 #include "gromacs/utility/fatalerror.h"
59 #include "gromacs/utility/gmxassert.h"
60 #include "gromacs/utility/logger.h"
61 #include "gromacs/utility/programcontext.h"
62 #include "gromacs/utility/stringutil.h"
65 /* DISCLAIMER: All the atom count and thread numbers below are heuristic.
66 * The real switching points will depend on the system simulation,
67 * the algorithms used and the hardware it's running on, as well as if there
68 * are other jobs running on the same machine. We try to take into account
69 * factors that have a large influence, such as recent Intel CPUs being
70 * much better at wide multi-threading. The remaining factors should
71 * (hopefully) have a small influence, such that the performance just before
72 * and after a switch point doesn't change too much.
75 //! Constant used to help minimize preprocessed code
76 static const bool bHasOmpSupport
= GMX_OPENMP
;
78 //! Constant used to help minimize preprocessed code
79 static const bool bGPUBinary
= GMX_GPU
!= GMX_GPU_NONE
;
82 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
83 * the number of threads will get lowered.
85 static const int min_atoms_per_mpi_thread
= 90;
86 static const int min_atoms_per_gpu
= 900;
87 #endif /* GMX_THREAD_MPI */
89 /* TODO choose nthreads_omp based on hardware topology
90 when we have a hardware topology detection library */
91 /* First we consider the case of no MPI (1 MPI rank).
92 * In general, when running up to 8 threads, OpenMP should be faster.
93 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
94 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
95 * even on two CPUs it's usually faster (but with many OpenMP threads
96 * it could be faster not to use HT, currently we always use HT).
97 * On Nehalem/Westmere we want to avoid running 16 threads over
98 * two CPUs with HT, so we need a limit<16; thus we use 12.
99 * A reasonable limit for Intel Sandy and Ivy bridge,
100 * not knowing the topology, is 16 threads.
101 * Below we check for Intel and AVX, which for now includes
102 * Sandy/Ivy Bridge, Has/Broadwell. By checking for AVX instead of
103 * model numbers we ensure also future Intel CPUs are covered.
105 const int nthreads_omp_faster_default
= 8;
106 const int nthreads_omp_faster_Nehalem
= 12;
107 const int nthreads_omp_faster_Intel_AVX
= 16;
108 const int nthreads_omp_faster_AMD_Ryzen
= 16;
109 /* For CPU only runs the fastest options are usually MPI or OpenMP only.
110 * With one GPU, using MPI only is almost never optimal, so we need to
111 * compare running pure OpenMP with combined MPI+OpenMP. This means higher
112 * OpenMP threads counts can still be ok. Multiplying the numbers above
113 * by a factor of 2 seems to be a good estimate.
115 const int nthreads_omp_faster_gpu_fac
= 2;
117 /* This is the case with MPI (2 or more MPI PP ranks).
118 * By default we will terminate with a fatal error when more than 8
119 * OpenMP thread are (indirectly) requested, since using less threads
120 * nearly always results in better performance.
121 * With thread-mpi and multiple GPUs or one GPU and too many threads
122 * we first try 6 OpenMP threads and then less until the number of MPI ranks
123 * is divisible by the number of GPUs.
125 #if GMX_OPENMP && GMX_MPI
126 const int nthreads_omp_mpi_ok_max
= 8;
127 const int nthreads_omp_mpi_ok_min_cpu
= 1;
129 const int nthreads_omp_mpi_ok_min_gpu
= 2;
130 const int nthreads_omp_mpi_target_max
= 6;
133 /* Returns the maximum OpenMP thread count for which using a single MPI rank
134 * should be faster than using multiple ranks with the same total thread count.
136 static int nthreads_omp_faster(const gmx::CpuInfo
&cpuInfo
, gmx_bool bUseGPU
)
140 if (cpuInfo
.vendor() == gmx::CpuInfo::Vendor::Intel
&&
141 cpuInfo
.feature(gmx::CpuInfo::Feature::X86_Avx
))
143 nth
= nthreads_omp_faster_Intel_AVX
;
145 else if (gmx::cpuIsX86Nehalem(cpuInfo
))
148 nth
= nthreads_omp_faster_Nehalem
;
150 else if (cpuInfo
.vendor() == gmx::CpuInfo::Vendor::Amd
&& cpuInfo
.family() >= 23)
153 nth
= nthreads_omp_faster_AMD_Ryzen
;
157 nth
= nthreads_omp_faster_default
;
162 nth
*= nthreads_omp_faster_gpu_fac
;
165 nth
= std::min(nth
, GMX_OPENMP_MAX_THREADS
);
170 /* Returns that maximum OpenMP thread count that passes the efficiency check */
171 static int nthreads_omp_efficient_max(int gmx_unused nrank
,
172 const gmx::CpuInfo
&cpuInfo
,
175 #if GMX_OPENMP && GMX_MPI
178 return nthreads_omp_mpi_ok_max
;
183 return nthreads_omp_faster(cpuInfo
, bUseGPU
);
187 /* Return the number of thread-MPI ranks to use.
188 * This is chosen such that we can always obey our own efficiency checks.
190 static int get_tmpi_omp_thread_division(const gmx_hw_info_t
*hwinfo
,
191 const gmx_hw_opt_t
*hw_opt
,
196 const gmx::CpuInfo
&cpuInfo
= *hwinfo
->cpuInfo
;
198 GMX_RELEASE_ASSERT(nthreads_tot
> 0, "There must be at least one thread per rank");
200 /* There are no separate PME nodes here, as we ensured in
201 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
202 * and a conditional ensures we would not have ended up here.
203 * Note that separate PME nodes might be switched on later.
209 /* When the user sets nthreads_omp, we can end up oversubscribing CPU cores
210 * if we simply start as many ranks as GPUs. To avoid this, we start as few
211 * tMPI ranks as necessary to avoid oversubscription and instead leave GPUs idle.
212 * If the user does not set the number of OpenMP threads, nthreads_omp==0 and
213 * this code has no effect.
215 GMX_RELEASE_ASSERT(hw_opt
->nthreads_omp
>= 0, "nthreads_omp is negative, but previous checks should have prevented this");
216 while (nrank
*hw_opt
->nthreads_omp
> hwinfo
->nthreads_hw_avail
&& nrank
> 1)
221 if (nthreads_tot
< nrank
)
223 /* #thread < #gpu is very unlikely, but if so: waste gpu(s) */
224 nrank
= nthreads_tot
;
226 else if (gmx_gpu_sharing_supported() &&
227 (nthreads_tot
> nthreads_omp_faster(cpuInfo
, ngpu
> 0) ||
228 (ngpu
> 1 && nthreads_tot
/ngpu
> nthreads_omp_mpi_target_max
)))
230 /* The high OpenMP thread count will likely result in sub-optimal
231 * performance. Increase the rank count to reduce the thread count
232 * per rank. This will lead to GPU sharing by MPI ranks/threads.
236 /* Increase the rank count as long as have we more than 6 OpenMP
237 * threads per rank or the number of hardware threads is not
238 * divisible by the rank count. Don't go below 2 OpenMP threads.
246 while (nthreads_tot
/nrank
> nthreads_omp_mpi_target_max
||
247 (nthreads_tot
/(ngpu
*(nshare
+ 1)) >= nthreads_omp_mpi_ok_min_gpu
&& nthreads_tot
% nrank
!= 0));
250 else if (hw_opt
->nthreads_omp
> 0)
252 /* Here we could oversubscribe, when we do, we issue a warning later */
253 nrank
= std::max(1, nthreads_tot
/hw_opt
->nthreads_omp
);
257 if (nthreads_tot
<= nthreads_omp_faster(cpuInfo
, ngpu
> 0))
259 /* Use pure OpenMP parallelization */
264 /* Don't use OpenMP parallelization */
265 nrank
= nthreads_tot
;
273 static int getMaxGpuUsable(const gmx::MDLogger
&mdlog
, const gmx_hw_info_t
*hwinfo
,
274 int cutoff_scheme
, gmx_bool bUseGpu
)
276 /* This code relies on the fact that GPU are not detected when GPU
277 * acceleration was disabled at run time by the user.
279 if (cutoff_scheme
== ecutsVERLET
&&
281 hwinfo
->gpu_info
.n_dev_compatible
> 0)
283 if (gmx_multiple_gpu_per_node_supported())
285 return hwinfo
->gpu_info
.n_dev_compatible
;
289 if (hwinfo
->gpu_info
.n_dev_compatible
> 1)
291 GMX_LOG(mdlog
.warning
).asParagraph().appendText("More than one compatible GPU is available, but GROMACS can only use one of them. Using a single thread-MPI rank.");
306 gmxSmtIsEnabled(const gmx::HardwareTopology
&hwTop
)
308 return (hwTop
.supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic
&& hwTop
.machine().sockets
[0].cores
[0].hwThreads
.size() > 1);
314 class SingleRankChecker
318 SingleRankChecker() : value_(false), reasons_() {}
319 /*! \brief Call this function for each possible condition
320 under which a single rank is required, along with a string
321 describing the constraint when it is applied. */
322 void applyConstraint(bool condition
, const char *description
)
327 reasons_
.push_back(gmx::formatString("%s only supports a single rank.", description
));
330 //! After applying any conditions, is a single rank required?
331 bool mustUseOneRank() const
335 /*! \brief Return a formatted string to use when writing a
336 message when a single rank is required, (or empty if no
337 constraint exists.) */
338 std::string
getMessage() const
340 return formatAndJoin(reasons_
, "\n", gmx::IdentityFormatter());
344 std::vector
<std::string
> reasons_
;
349 /* Get the number of MPI ranks to use for thread-MPI based on how many
350 * were requested, which algorithms we're using,
351 * and how many particles there are.
352 * At the point we have already called check_and_update_hw_opt.
353 * Thus all options should be internally consistent and consistent
354 * with the hardware, except that ntmpi could be larger than #GPU.
356 int get_nthreads_mpi(const gmx_hw_info_t
*hwinfo
,
357 gmx_hw_opt_t
*hw_opt
,
358 const t_inputrec
*inputrec
,
359 const gmx_mtop_t
*mtop
,
360 const gmx::MDLogger
&mdlog
,
364 int nthreads_hw
, nthreads_tot_max
, nrank
, ngpu
;
365 int min_atoms_per_mpi_rank
;
367 const gmx::CpuInfo
&cpuInfo
= *hwinfo
->cpuInfo
;
368 const gmx::HardwareTopology
&hwTop
= *hwinfo
->hardwareTopology
;
371 /* Check if an algorithm does not support parallel simulation. */
372 // TODO This might work better if e.g. implemented algorithms
373 // had to define a function that returns such requirements,
374 // and a description string.
375 SingleRankChecker checker
;
376 checker
.applyConstraint(inputrec
->eI
== eiLBFGS
, "L-BFGS minimization");
377 checker
.applyConstraint(inputrec
->coulombtype
== eelEWALD
, "Plain Ewald electrostatics");
378 checker
.applyConstraint(doMembed
, "Membrane embedding");
379 if (checker
.mustUseOneRank())
381 std::string message
= checker
.getMessage();
382 if (hw_opt
->nthreads_tmpi
> 1)
384 gmx_fatal(FARGS
, "%s However, you asked for more than 1 thread-MPI rank, so mdrun cannot continue. Choose a single rank, or a different algorithm.", message
.c_str());
386 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted("%s Choosing to use only a single thread-MPI rank.", message
.c_str());
391 if (hw_opt
->nthreads_tmpi
> 0)
393 /* Trivial, return right away */
394 return hw_opt
->nthreads_tmpi
;
397 nthreads_hw
= hwinfo
->nthreads_hw_avail
;
399 if (nthreads_hw
<= 0)
401 /* This should normally not happen, but if it does, we handle it */
402 gmx_fatal(FARGS
, "The number of available hardware threads can not be detected, please specify the number of MPI ranks and the number of OpenMP threads (if supported) manually with options -ntmpi and -ntomp, respectively");
405 /* How many total (#tMPI*#OpenMP) threads can we start? */
406 if (hw_opt
->nthreads_tot
> 0)
408 nthreads_tot_max
= hw_opt
->nthreads_tot
;
412 nthreads_tot_max
= nthreads_hw
;
415 ngpu
= getMaxGpuUsable(mdlog
, hwinfo
, inputrec
->cutoff_scheme
, bUseGpu
);
417 if (inputrec
->cutoff_scheme
== ecutsGROUP
)
419 /* We checked this before, but it doesn't hurt to do it once more */
420 GMX_RELEASE_ASSERT(hw_opt
->nthreads_omp
== 1, "The group scheme only supports one OpenMP thread per rank");
424 get_tmpi_omp_thread_division(hwinfo
, hw_opt
, nthreads_tot_max
, ngpu
);
426 if (inputrec
->eI
== eiNM
|| EI_TPI(inputrec
->eI
))
428 /* Dims/steps are divided over the nodes iso splitting the atoms.
429 * With NM we can't have more ranks than #atoms*#dim. With TPI it's
430 * unlikely we have fewer atoms than ranks, and if so, communication
431 * would become a bottleneck, so we set the limit to 1 atom/rank.
433 min_atoms_per_mpi_rank
= 1;
439 min_atoms_per_mpi_rank
= min_atoms_per_gpu
;
443 min_atoms_per_mpi_rank
= min_atoms_per_mpi_thread
;
447 if (mtop
->natoms
/nrank
< min_atoms_per_mpi_rank
)
451 /* the rank number was chosen automatically, but there are too few
452 atoms per rank, so we need to reduce the rank count */
453 nrank_new
= std::max(1, mtop
->natoms
/min_atoms_per_mpi_rank
);
455 /* Avoid partial use of Hyper-Threading */
456 if (gmxSmtIsEnabled(hwTop
) &&
457 nrank_new
> nthreads_hw
/2 && nrank_new
< nthreads_hw
)
459 nrank_new
= nthreads_hw
/2;
462 /* If the user specified the total thread count, ensure this is
463 * divisible by the number of ranks.
464 * It is quite likely that we have too many total threads compared
465 * to the size of the system, but if the user asked for this many
466 * threads we should respect that.
468 while (hw_opt
->nthreads_tot
> 0 &&
469 hw_opt
->nthreads_tot
% nrank_new
!= 0)
474 /* Avoid large prime numbers in the rank count */
477 /* Use only 6,8,10 with additional factors of 2 */
481 while (3*fac
*2 <= nrank_new
)
486 nrank_new
= (nrank_new
/fac
)*fac
;
490 /* Avoid 5, since small system won't fit 5 domains along
491 * a dimension. This might lead to waisting some cores, but this
492 * will have a small impact in this regime of very small systems.
502 /* We reduced the number of tMPI ranks, which means we might violate
503 * our own efficiency checks if we simply use all hardware threads.
505 if (bHasOmpSupport
&& hw_opt
->nthreads_omp
<= 0 && hw_opt
->nthreads_tot
<= 0)
507 /* The user set neither the total nor the OpenMP thread count,
508 * we should use all hardware threads, unless we will violate
509 * our own efficiency limitation on the thread count.
513 nt_omp_max
= nthreads_omp_efficient_max(nrank
, cpuInfo
, ngpu
>= 1);
515 if (nrank
*nt_omp_max
< hwinfo
->nthreads_hw_avail
)
517 /* Limit the number of OpenMP threads to start */
518 hw_opt
->nthreads_omp
= nt_omp_max
;
522 fprintf(stderr
, "\n");
523 fprintf(stderr
, "NOTE: Parallelization is limited by the small number of atoms,\n");
524 fprintf(stderr
, " only starting %d thread-MPI ranks.\n", nrank
);
525 fprintf(stderr
, " You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
530 #endif /* GMX_THREAD_MPI */
533 void check_resource_division_efficiency(const gmx_hw_info_t
*hwinfo
,
534 const gmx_hw_opt_t
*hw_opt
,
535 gmx_bool bNtOmpOptionSet
,
537 const gmx::MDLogger
&mdlog
)
539 #if GMX_OPENMP && GMX_MPI
540 int nth_omp_min
, nth_omp_max
, ngpu
;
543 const char *mpi_option
= " (option -ntmpi)";
545 const char *mpi_option
= "";
548 /* This function should be called after thread-MPI (when configured) and
549 * OpenMP have been initialized. Check that here.
552 GMX_RELEASE_ASSERT(nthreads_omp_faster_default
>= nthreads_omp_mpi_ok_max
, "Inconsistent OpenMP thread count default values");
553 GMX_RELEASE_ASSERT(hw_opt
->nthreads_tmpi
>= 1, "Must have at least one thread-MPI rank");
555 GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault
) >= 1, "Must have at least one OpenMP thread");
557 nth_omp_min
= gmx_omp_nthreads_get(emntDefault
);
558 nth_omp_max
= gmx_omp_nthreads_get(emntDefault
);
559 ngpu
= hw_opt
->gpu_opt
.n_dev_use
;
561 /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
562 if (cr
->nnodes
+ cr
->npmenodes
> 1)
564 int count
[3], count_max
[3];
566 count
[0] = -nth_omp_min
;
567 count
[1] = nth_omp_max
;
570 MPI_Allreduce(count
, count_max
, 3, MPI_INT
, MPI_MAX
, cr
->mpi_comm_mysim
);
572 /* In case of an inhomogeneous run setup we use the maximum counts */
573 nth_omp_min
= -count_max
[0];
574 nth_omp_max
= count_max
[1];
578 int nthreads_omp_mpi_ok_min
;
582 nthreads_omp_mpi_ok_min
= nthreads_omp_mpi_ok_min_cpu
;
586 /* With GPUs we set the minimum number of OpenMP threads to 2 to catch
587 * cases where the user specifies #ranks == #cores.
589 nthreads_omp_mpi_ok_min
= nthreads_omp_mpi_ok_min_gpu
;
592 if (DOMAINDECOMP(cr
) && cr
->nnodes
> 1)
594 if (nth_omp_max
< nthreads_omp_mpi_ok_min
||
595 (!(ngpu
> 0 && !gmx_gpu_sharing_supported()) &&
596 nth_omp_max
> nthreads_omp_mpi_ok_max
))
598 /* Note that we print target_max here, not ok_max */
599 sprintf(buf
, "Your choice of number of MPI ranks and amount of resources results in using %d OpenMP threads per rank, which is most likely inefficient. The optimum is usually between %d and %d threads per rank.",
601 nthreads_omp_mpi_ok_min
,
602 nthreads_omp_mpi_target_max
);
606 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted("NOTE: %s", buf
);
610 /* This fatal error, and the one below, is nasty, but it's
611 * probably the only way to ensure that all users don't waste
612 * a lot of resources, since many users don't read logs/stderr.
614 gmx_fatal(FARGS
, "%s If you want to run with this setup, specify the -ntomp option. But we suggest to change the number of MPI ranks%s.", buf
, mpi_option
);
620 const gmx::CpuInfo
&cpuInfo
= *hwinfo
->cpuInfo
;
622 /* No domain decomposition (or only one domain) */
623 if (!(ngpu
> 0 && !gmx_gpu_sharing_supported()) &&
624 nth_omp_max
> nthreads_omp_faster(cpuInfo
, ngpu
> 0))
626 /* To arrive here, the user/system set #ranks and/or #OMPthreads */
630 bEnvSet
= (getenv("OMP_NUM_THREADS") != nullptr);
632 if (bNtOmpOptionSet
|| bEnvSet
)
634 sprintf(buf2
, "You requested %d OpenMP threads", nth_omp_max
);
638 sprintf(buf2
, "Your choice of %d MPI rank%s and the use of %d total threads %sleads to the use of %d OpenMP threads",
639 cr
->nnodes
+ cr
->npmenodes
,
640 cr
->nnodes
+ cr
->npmenodes
== 1 ? "" : "s",
641 hw_opt
->nthreads_tot
> 0 ? hw_opt
->nthreads_tot
: hwinfo
->nthreads_hw_avail
,
642 hwinfo
->nphysicalnode
> 1 ? "on a node " : "",
645 sprintf(buf
, "%s, whereas we expect the optimum to be with more MPI ranks with %d to %d OpenMP threads.",
646 buf2
, nthreads_omp_mpi_ok_min
, nthreads_omp_mpi_target_max
);
648 /* We can not quit with a fatal error when OMP_NUM_THREADS is set
649 * with different values per rank or node, since in that case
650 * the user can not set -ntomp to override the error.
652 if (bNtOmpOptionSet
|| (bEnvSet
&& nth_omp_min
!= nth_omp_max
))
654 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted("NOTE: %s", buf
);
658 gmx_fatal(FARGS
, "%s If you want to run with this many OpenMP threads, specify the -ntomp option. But we suggest to increase the number of MPI ranks%s.", buf
, mpi_option
);
662 #else /* GMX_OPENMP && GMX_MPI */
663 /* No OpenMP and/or MPI: it doesn't make much sense to check */
664 GMX_UNUSED_VALUE(hw_opt
);
665 GMX_UNUSED_VALUE(bNtOmpOptionSet
);
666 GMX_UNUSED_VALUE(cr
);
667 /* Check if we have more than 1 physical core, if detected,
668 * or more than 1 hardware thread if physical cores were not detected.
670 if (!GMX_OPENMP
&& !GMX_MPI
&& hwinfo
->hardwareTopology
->numberOfCores() > 1)
672 GMX_LOG(mdlog
.warning
).asParagraph().appendText("NOTE: GROMACS was compiled without OpenMP and (thread-)MPI support, can only use a single CPU core");
674 #endif /* GMX_OPENMP && GMX_MPI */
678 static void print_hw_opt(FILE *fp
, const gmx_hw_opt_t
*hw_opt
)
680 fprintf(fp
, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
681 hw_opt
->nthreads_tot
,
682 hw_opt
->nthreads_tmpi
,
683 hw_opt
->nthreads_omp
,
684 hw_opt
->nthreads_omp_pme
,
685 hw_opt
->gpu_opt
.gpu_id
!= nullptr ? hw_opt
->gpu_opt
.gpu_id
: "");
688 //! \brief Return if any GPU ID (e.g in a user-supplied string) is repeated
689 static gmx_bool
anyGpuIdIsRepeated(const gmx_gpu_opt_t
*gpu_opt
)
691 /* Loop over IDs in the string */
692 for (int i
= 0; i
< gpu_opt
->n_dev_use
- 1; ++i
)
694 /* Look for the ID in location i in the following part of the
696 for (int j
= i
+ 1; j
< gpu_opt
->n_dev_use
; ++j
)
698 if (gpu_opt
->dev_use
[i
] == gpu_opt
->dev_use
[j
])
700 /* Same ID found in locations i and j */
709 /* Checks we can do when we don't (yet) know the cut-off scheme */
710 void check_and_update_hw_opt_1(gmx_hw_opt_t
*hw_opt
,
714 /* Currently hw_opt only contains default settings or settings supplied
715 * by the user on the command line.
717 if (hw_opt
->nthreads_omp
< 0)
719 gmx_fatal(FARGS
, "The number of OpenMP threads supplied on the command line is %d, which is negative and not allowed", hw_opt
->nthreads_omp
);
722 /* Check for OpenMP settings stored in environment variables, which can
723 * potentially be different on different MPI ranks.
725 gmx_omp_nthreads_read_env(&hw_opt
->nthreads_omp
, SIMMASTER(cr
));
727 /* Check restrictions on the user supplied options before modifying them.
728 * TODO: Put the user values in a const struct and preserve them.
731 if (hw_opt
->nthreads_tot
> 0)
733 gmx_fatal(FARGS
, "Setting the total number of threads is only supported with thread-MPI and GROMACS was compiled without thread-MPI");
735 if (hw_opt
->nthreads_tmpi
> 0)
737 gmx_fatal(FARGS
, "Setting the number of thread-MPI ranks is only supported with thread-MPI and GROMACS was compiled without thread-MPI");
743 /* Check restrictions on PME thread related options set by the user */
745 if (hw_opt
->nthreads_omp_pme
> 0 && hw_opt
->nthreads_omp
<= 0)
747 gmx_fatal(FARGS
, "You need to specify -ntomp in addition to -ntomp_pme");
750 if (hw_opt
->nthreads_omp_pme
>= 1 &&
751 hw_opt
->nthreads_omp_pme
!= hw_opt
->nthreads_omp
&&
754 /* This can result in a fatal error on many MPI ranks,
755 * but since the thread count can differ per rank,
756 * we can't easily avoid this.
758 gmx_fatal(FARGS
, "You need to explicitly specify the number of PME ranks (-npme) when using different number of OpenMP threads for PP and PME ranks");
763 /* GROMACS was configured without OpenMP support */
765 if (hw_opt
->nthreads_omp
> 1 || hw_opt
->nthreads_omp_pme
> 1)
767 gmx_fatal(FARGS
, "More than 1 OpenMP thread requested, but GROMACS was compiled without OpenMP support");
769 hw_opt
->nthreads_omp
= 1;
770 hw_opt
->nthreads_omp_pme
= 1;
773 if (hw_opt
->nthreads_tot
> 0 && hw_opt
->nthreads_omp_pme
<= 0)
775 /* We have the same number of OpenMP threads for PP and PME ranks,
776 * thus we can perform several consistency checks.
778 if (hw_opt
->nthreads_tmpi
> 0 &&
779 hw_opt
->nthreads_omp
> 0 &&
780 hw_opt
->nthreads_tot
!= hw_opt
->nthreads_tmpi
*hw_opt
->nthreads_omp
)
782 gmx_fatal(FARGS
, "The total number of threads requested (%d) does not match the thread-MPI ranks (%d) times the OpenMP threads (%d) requested",
783 hw_opt
->nthreads_tot
, hw_opt
->nthreads_tmpi
, hw_opt
->nthreads_omp
);
786 if (hw_opt
->nthreads_tmpi
> 0 &&
787 hw_opt
->nthreads_tot
% hw_opt
->nthreads_tmpi
!= 0)
789 gmx_fatal(FARGS
, "The total number of threads requested (%d) is not divisible by the number of thread-MPI ranks requested (%d)",
790 hw_opt
->nthreads_tot
, hw_opt
->nthreads_tmpi
);
793 if (hw_opt
->nthreads_omp
> 0 &&
794 hw_opt
->nthreads_tot
% hw_opt
->nthreads_omp
!= 0)
796 gmx_fatal(FARGS
, "The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
797 hw_opt
->nthreads_tot
, hw_opt
->nthreads_omp
);
801 if (hw_opt
->nthreads_tot
== 1)
803 hw_opt
->nthreads_tmpi
= 1;
805 if (hw_opt
->nthreads_omp
> 1)
807 gmx_fatal(FARGS
, "You requested %d OpenMP threads with %d total threads",
808 hw_opt
->nthreads_tmpi
, hw_opt
->nthreads_tot
);
810 hw_opt
->nthreads_omp
= 1;
811 hw_opt
->nthreads_omp_pme
= 1;
814 if (hw_opt
->gpu_opt
.n_dev_use
> 0 && !bGPUBinary
)
816 gmx_fatal(FARGS
, "GPU IDs have been selected, but %s was compiled without GPU support!",
817 gmx::getProgramContext().displayName());
820 if (!gmx_multiple_gpu_per_node_supported() && 1 < hw_opt
->gpu_opt
.n_dev_use
)
822 gmx_fatal(FARGS
, "The %s implementation only supports using exactly one PP rank per node", getGpuImplementationString());
824 if (!gmx_gpu_sharing_supported() && anyGpuIdIsRepeated(&hw_opt
->gpu_opt
))
826 gmx_fatal(FARGS
, "The %s implementation only supports using exactly one PP rank per GPU", getGpuImplementationString());
830 if (hw_opt
->gpu_opt
.n_dev_use
> 0 && hw_opt
->nthreads_tmpi
== 0)
832 /* Set the number of MPI threads equal to the number of GPUs */
833 hw_opt
->nthreads_tmpi
= hw_opt
->gpu_opt
.n_dev_use
;
835 if (hw_opt
->nthreads_tot
> 0 &&
836 hw_opt
->nthreads_tmpi
> hw_opt
->nthreads_tot
)
838 /* We have more GPUs than total threads requested.
839 * We choose to (later) generate a mismatch error,
840 * instead of launching more threads than requested.
842 hw_opt
->nthreads_tmpi
= hw_opt
->nthreads_tot
;
849 print_hw_opt(debug
, hw_opt
);
852 /* Asserting this simplifies the hardware resource division later
854 GMX_RELEASE_ASSERT(!(hw_opt
->nthreads_omp_pme
>= 1 && hw_opt
->nthreads_omp
<= 0),
855 "PME thread count should only be set when the normal thread count is also set");
858 /* Checks we can do when we know the cut-off scheme */
859 void check_and_update_hw_opt_2(gmx_hw_opt_t
*hw_opt
,
862 if (cutoff_scheme
== ecutsGROUP
)
864 /* We only have OpenMP support for PME only nodes */
865 if (hw_opt
->nthreads_omp
> 1)
867 gmx_fatal(FARGS
, "OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
868 ecutscheme_names
[cutoff_scheme
],
869 ecutscheme_names
[ecutsVERLET
]);
871 hw_opt
->nthreads_omp
= 1;
875 /* Checks we can do when we know the thread-MPI rank count */
876 void check_and_update_hw_opt_3(gmx_hw_opt_t gmx_unused
*hw_opt
)
879 GMX_RELEASE_ASSERT(hw_opt
->nthreads_tmpi
>= 1, "Must have at least one thread-MPI rank");
881 /* If the user set the total number of threads on the command line
882 * and did not specify the number of OpenMP threads, set the latter here.
884 if (hw_opt
->nthreads_tot
> 0 && hw_opt
->nthreads_omp
<= 0)
886 hw_opt
->nthreads_omp
= hw_opt
->nthreads_tot
/hw_opt
->nthreads_tmpi
;
888 if (!bHasOmpSupport
&& hw_opt
->nthreads_omp
> 1)
890 gmx_fatal(FARGS
, "You (indirectly) asked for OpenMP threads by setting -nt > -ntmpi, but GROMACS was compiled without OpenMP support");
895 GMX_RELEASE_ASSERT(bHasOmpSupport
|| hw_opt
->nthreads_omp
== 1, "Without OpenMP support, only one thread per rank can be used");
897 /* We are done with updating nthreads_omp, we can set nthreads_omp_pme */
898 if (hw_opt
->nthreads_omp_pme
<= 0 && hw_opt
->nthreads_omp
> 0)
900 hw_opt
->nthreads_omp_pme
= hw_opt
->nthreads_omp
;
905 print_hw_opt(debug
, hw_opt
);