2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains function definitions necessary for
39 * managing automatic load balance of PME calculations (Coulomb and
42 * \author Berk Hess <hess@kth.se>
43 * \ingroup module_ewald
47 #include "pme_load_balancing.h"
54 #include "gromacs/domdec/dlb.h"
55 #include "gromacs/domdec/domdec.h"
56 #include "gromacs/domdec/domdec_network.h"
57 #include "gromacs/domdec/domdec_struct.h"
58 #include "gromacs/domdec/partition.h"
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/ewald/pme.h"
61 #include "gromacs/fft/calcgrid.h"
62 #include "gromacs/gmxlib/network.h"
63 #include "gromacs/math/functions.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/dispersioncorrection.h"
66 #include "gromacs/mdlib/forcerec.h"
67 #include "gromacs/mdtypes/commrec.h"
68 #include "gromacs/mdtypes/forcerec.h"
69 #include "gromacs/mdtypes/inputrec.h"
70 #include "gromacs/mdtypes/interaction_const.h"
71 #include "gromacs/mdtypes/md_enums.h"
72 #include "gromacs/mdtypes/state.h"
73 #include "gromacs/nbnxm/gpu_data_mgmt.h"
74 #include "gromacs/nbnxm/nbnxm.h"
75 #include "gromacs/pbcutil/pbc.h"
76 #include "gromacs/timing/wallcycle.h"
77 #include "gromacs/timing/walltime_accounting.h"
78 #include "gromacs/utility/cstringutil.h"
79 #include "gromacs/utility/fatalerror.h"
80 #include "gromacs/utility/gmxassert.h"
81 #include "gromacs/utility/logger.h"
82 #include "gromacs/utility/smalloc.h"
83 #include "gromacs/utility/strconvert.h"
85 #include "pme_internal.h"
88 /*! \brief Parameters and settings for one PP-PME setup */
91 real rcut_coulomb
; /**< Coulomb cut-off */
92 real rlistOuter
; /**< cut-off for the outer pair-list */
93 real rlistInner
; /**< cut-off for the inner pair-list */
94 real spacing
; /**< (largest) PME grid spacing */
95 ivec grid
; /**< the PME grid dimensions */
96 real grid_efficiency
; /**< ineffiency factor for non-uniform grids <= 1 */
97 real ewaldcoeff_q
; /**< Electrostatic Ewald coefficient */
98 real ewaldcoeff_lj
; /**< LJ Ewald coefficient, only for the call to send_switchgrid */
99 struct gmx_pme_t
* pmedata
; /**< the data structure used in the PME code */
100 int count
; /**< number of times this setup has been timed */
101 double cycles
; /**< the fastest time for this setup in cycles */
104 /*! \brief After 50 nstlist periods of not observing imbalance: never tune PME */
105 const int PMETunePeriod
= 50;
106 /*! \brief Trigger PME load balancing at more than 5% PME overload */
107 const real loadBalanceTriggerFactor
= 1.05;
108 /*! \brief Scale the grid by a most at factor 1.7.
110 * This still leaves room for about 4-4.5x decrease in grid spacing while limiting the cases where
111 * large imbalance leads to extreme cutoff scaling for marginal benefits.
113 * This should help to avoid:
114 * - large increase in power consumption for little performance gain
115 * - increasing communication volume
118 const real c_maxSpacingScaling
= 1.7;
119 /*! \brief In the initial scan, step by grids that are at least a factor 0.8 coarser */
120 const real gridpointsScaleFactor
= 0.8;
121 /*! \brief In the initial scan, try to skip grids with uneven x/y/z spacing,
122 * checking if the "efficiency" is more than 5% worse than the previous grid.
124 const real relativeEfficiencyFactor
= 1.05;
125 /*! \brief Rerun until a run is 12% slower setups than the fastest run so far */
126 const real maxRelativeSlowdownAccepted
= 1.12;
127 /*! \brief If setups get more than 2% faster, do another round to avoid
128 * choosing a slower setup due to acceleration or fluctuations.
130 const real maxFluctuationAccepted
= 1.02;
132 //! \brief Number of nstlist long tuning intervals to skip before starting
133 // load-balancing at the beginning of the run.
134 const int c_numFirstTuningIntervalSkip
= 5;
135 //! \brief Number of nstlist long tuning intervals to skip before starting
136 // load-balancing at the beginning of the run with separate PME ranks. */
137 const int c_numFirstTuningIntervalSkipWithSepPme
= 3;
138 //! \brief Number of nstlist long tuning intervals to skip after switching to a new setting
140 const int c_numPostSwitchTuningIntervalSkip
= 1;
141 //! \brief Number of seconds to delay the tuning at startup to allow processors clocks to ramp up.
142 const double c_startupTimeDelay
= 5.0;
144 /*! \brief Enumeration whose values describe the effect limiting the load balancing */
155 /*! \brief Descriptive strings matching ::epmelb */
156 static const char* pmelblim_str
[epmelblimNR
] = { "no", "box size", "domain decompostion",
157 "PME grid restriction",
158 "maximum allowed grid scaling" };
160 struct pme_load_balancing_t
162 gmx_bool bSepPMERanks
; /**< do we have separate PME ranks? */
163 gmx_bool bActive
; /**< is PME tuning active? */
164 int64_t step_rel_stop
; /**< stop the tuning after this value of step_rel */
165 gmx_bool bTriggerOnDLB
; /**< trigger balancing only on DD DLB */
166 gmx_bool bBalance
; /**< are we in the balancing phase, i.e. trying different setups? */
167 int nstage
; /**< the current maximum number of stages */
168 bool startupTimeDelayElapsed
; /**< Has the c_startupTimeDelay elapsed indicating that the balancing can start. */
170 real cut_spacing
; /**< the minimum cutoff / PME grid spacing ratio */
171 real rcut_vdw
; /**< Vdw cutoff (does not change) */
172 real rcut_coulomb_start
; /**< Initial electrostatics cutoff */
173 real rbufOuter_coulomb
; /**< the outer pairlist buffer size */
174 real rbufOuter_vdw
; /**< the outer pairlist buffer size */
175 real rbufInner_coulomb
; /**< the inner pairlist buffer size */
176 real rbufInner_vdw
; /**< the inner pairlist buffer size */
177 matrix box_start
; /**< the initial simulation box */
178 std::vector
<pme_setup_t
> setup
; /**< the PME+cutoff setups */
179 int cur
; /**< the index (in setup) of the current setup */
180 int fastest
; /**< index of the fastest setup up till now */
181 int lower_limit
; /**< don't go below this setup index */
182 int start
; /**< start of setup index range to consider in stage>0 */
183 int end
; /**< end of setup index range to consider in stage>0 */
184 int elimited
; /**< was the balancing limited, uses enum above */
185 int cutoff_scheme
; /**< Verlet or group cut-offs */
187 int stage
; /**< the current stage */
189 int cycles_n
; /**< step cycle counter cumulative count */
190 double cycles_c
; /**< step cycle counter cumulative cycles */
191 double startTime
; /**< time stamp when the balancing was started on the master rank (relative to the UNIX epoch start).*/
194 /* TODO The code in this file should call this getter, rather than
195 * read bActive anywhere */
196 bool pme_loadbal_is_active(const pme_load_balancing_t
* pme_lb
)
198 return pme_lb
!= nullptr && pme_lb
->bActive
;
201 // TODO Return a unique_ptr to pme_load_balancing_t
202 void pme_loadbal_init(pme_load_balancing_t
** pme_lb_p
,
204 const gmx::MDLogger
& mdlog
,
205 const t_inputrec
& ir
,
207 const interaction_const_t
& ic
,
208 const nonbonded_verlet_t
& nbv
,
213 pme_load_balancing_t
* pme_lb
;
217 // Note that we don't (yet) support PME load balancing with LJ-PME only.
218 GMX_RELEASE_ASSERT(EEL_PME(ir
.coulombtype
),
219 "pme_loadbal_init called without PME electrostatics");
220 // To avoid complexity, we require a single cut-off with PME for q+LJ.
221 // This is checked by grompp, but it doesn't hurt to check again.
222 GMX_RELEASE_ASSERT(!(EEL_PME(ir
.coulombtype
) && EVDW_PME(ir
.vdwtype
) && ir
.rcoulomb
!= ir
.rvdw
),
223 "With Coulomb and LJ PME, rcoulomb should be equal to rvdw");
225 pme_lb
= new pme_load_balancing_t
;
227 pme_lb
->bSepPMERanks
= !thisRankHasDuty(cr
, DUTY_PME
);
229 /* Initially we turn on balancing directly on based on PP/PME imbalance */
230 pme_lb
->bTriggerOnDLB
= FALSE
;
232 /* Any number of stages >= 2 is supported */
235 pme_lb
->cutoff_scheme
= ir
.cutoff_scheme
;
237 pme_lb
->rbufOuter_coulomb
= nbv
.pairlistOuterRadius() - ic
.rcoulomb
;
238 pme_lb
->rbufOuter_vdw
= nbv
.pairlistOuterRadius() - ic
.rvdw
;
239 pme_lb
->rbufInner_coulomb
= nbv
.pairlistInnerRadius() - ic
.rcoulomb
;
240 pme_lb
->rbufInner_vdw
= nbv
.pairlistInnerRadius() - ic
.rvdw
;
242 /* Scale box with Ewald wall factor; note that we pmedata->boxScaler
243 * can't always usedd as it's not available with separate PME ranks.
245 EwaldBoxZScaler
boxScaler(ir
);
246 boxScaler
.scaleBox(box
, pme_lb
->box_start
);
248 pme_lb
->setup
.resize(1);
250 pme_lb
->rcut_vdw
= ic
.rvdw
;
251 pme_lb
->rcut_coulomb_start
= ir
.rcoulomb
;
254 pme_lb
->setup
[0].rcut_coulomb
= ic
.rcoulomb
;
255 pme_lb
->setup
[0].rlistOuter
= nbv
.pairlistOuterRadius();
256 pme_lb
->setup
[0].rlistInner
= nbv
.pairlistInnerRadius();
257 pme_lb
->setup
[0].grid
[XX
] = ir
.nkx
;
258 pme_lb
->setup
[0].grid
[YY
] = ir
.nky
;
259 pme_lb
->setup
[0].grid
[ZZ
] = ir
.nkz
;
260 pme_lb
->setup
[0].ewaldcoeff_q
= ic
.ewaldcoeff_q
;
261 pme_lb
->setup
[0].ewaldcoeff_lj
= ic
.ewaldcoeff_lj
;
263 if (!pme_lb
->bSepPMERanks
)
265 GMX_RELEASE_ASSERT(pmedata
,
266 "On ranks doing both PP and PME we need a valid pmedata object");
267 pme_lb
->setup
[0].pmedata
= pmedata
;
271 for (d
= 0; d
< DIM
; d
++)
273 sp
= norm(pme_lb
->box_start
[d
]) / pme_lb
->setup
[0].grid
[d
];
279 pme_lb
->setup
[0].spacing
= spm
;
281 if (ir
.fourier_spacing
> 0)
283 pme_lb
->cut_spacing
= ir
.rcoulomb
/ ir
.fourier_spacing
;
287 pme_lb
->cut_spacing
= ir
.rcoulomb
/ pme_lb
->setup
[0].spacing
;
293 pme_lb
->lower_limit
= 0;
296 pme_lb
->elimited
= epmelblimNO
;
298 pme_lb
->cycles_n
= 0;
299 pme_lb
->cycles_c
= 0;
300 // only master ranks do timing
301 if (!PAR(cr
) || (DOMAINDECOMP(cr
) && DDMASTER(cr
->dd
)))
303 pme_lb
->startTime
= gmx_gettime();
306 if (!wallcycle_have_counter())
308 GMX_LOG(mdlog
.warning
)
311 "NOTE: Cycle counters unsupported or not enabled in kernel. Cannot use "
312 "PME-PP balancing.");
315 /* Tune with GPUs and/or separate PME ranks.
316 * When running only on a CPU without PME ranks, PME tuning will only help
317 * with small numbers of atoms in the cut-off sphere.
319 pme_lb
->bActive
= (wallcycle_have_counter() && (bUseGPU
|| pme_lb
->bSepPMERanks
));
321 /* With GPUs and no separate PME ranks we can't measure the PP/PME
322 * imbalance, so we start balancing right away.
323 * Otherwise we only start balancing after we observe imbalance.
325 pme_lb
->bBalance
= (pme_lb
->bActive
&& (bUseGPU
&& !pme_lb
->bSepPMERanks
));
327 pme_lb
->step_rel_stop
= PMETunePeriod
* ir
.nstlist
;
329 /* Delay DD load balancing when GPUs are used */
330 if (pme_lb
->bActive
&& DOMAINDECOMP(cr
) && cr
->dd
->nnodes
> 1 && bUseGPU
)
332 /* Lock DLB=auto to off (does nothing when DLB=yes/no.
333 * With GPUs and separate PME nodes, we want to first
334 * do PME tuning without DLB, since DLB might limit
335 * the cut-off, which never improves performance.
336 * We allow for DLB + PME tuning after a first round of tuning.
339 if (dd_dlb_is_locked(cr
->dd
))
341 GMX_LOG(mdlog
.warning
)
343 .appendText("NOTE: DLB will not turn on during the first phase of PME tuning");
350 /*! \brief Try to increase the cutoff during load balancing */
351 static gmx_bool
pme_loadbal_increase_cutoff(pme_load_balancing_t
* pme_lb
, int pme_order
, const gmx_domdec_t
* dd
)
354 real tmpr_coulomb
, tmpr_vdw
;
358 /* Try to add a new setup with next larger cut-off to the list */
361 set
.pmedata
= nullptr;
363 NumPmeDomains numPmeDomains
= getNumPmeDomains(dd
);
368 /* Avoid infinite while loop, which can occur at the minimum grid size.
369 * Note that in practice load balancing will stop before this point.
370 * The factor 2.1 allows for the extreme case in which only grids
371 * of powers of 2 are allowed (the current code supports more grids).
379 clear_ivec(set
.grid
);
380 sp
= calcFftGrid(nullptr, pme_lb
->box_start
, fac
* pme_lb
->setup
[pme_lb
->cur
].spacing
,
381 minimalPmeGridSize(pme_order
), &set
.grid
[XX
], &set
.grid
[YY
], &set
.grid
[ZZ
]);
383 /* As here we can't easily check if one of the PME ranks
384 * uses threading, we do a conservative grid check.
385 * This means we can't use pme_order or less grid lines
386 * per PME rank along x, which is not a strong restriction.
388 grid_ok
= gmx_pme_check_restrictions(pme_order
, set
.grid
[XX
], set
.grid
[YY
], set
.grid
[ZZ
],
389 numPmeDomains
.x
, true, false);
390 } while (sp
<= 1.001 * pme_lb
->setup
[pme_lb
->cur
].spacing
|| !grid_ok
);
392 set
.rcut_coulomb
= pme_lb
->cut_spacing
* sp
;
393 if (set
.rcut_coulomb
< pme_lb
->rcut_coulomb_start
)
395 /* This is unlikely, but can happen when e.g. continuing from
396 * a checkpoint after equilibration where the box shrank a lot.
397 * We want to avoid rcoulomb getting smaller than rvdw
398 * and there might be more issues with decreasing rcoulomb.
400 set
.rcut_coulomb
= pme_lb
->rcut_coulomb_start
;
403 if (pme_lb
->cutoff_scheme
== ecutsVERLET
)
405 /* Never decrease the Coulomb and VdW list buffers */
406 set
.rlistOuter
= std::max(set
.rcut_coulomb
+ pme_lb
->rbufOuter_coulomb
,
407 pme_lb
->rcut_vdw
+ pme_lb
->rbufOuter_vdw
);
408 set
.rlistInner
= std::max(set
.rcut_coulomb
+ pme_lb
->rbufInner_coulomb
,
409 pme_lb
->rcut_vdw
+ pme_lb
->rbufInner_vdw
);
413 /* TODO Remove these lines and pme_lb->cutoff_scheme */
414 tmpr_coulomb
= set
.rcut_coulomb
+ pme_lb
->rbufOuter_coulomb
;
415 tmpr_vdw
= pme_lb
->rcut_vdw
+ pme_lb
->rbufOuter_vdw
;
416 /* Two (known) bugs with cutoff-scheme=group here:
417 * - This modification of rlist results in incorrect DD comunication.
418 * - We should set fr->bTwinRange = (fr->rlistlong > fr->rlist).
420 set
.rlistOuter
= std::min(tmpr_coulomb
, tmpr_vdw
);
421 set
.rlistInner
= set
.rlistOuter
;
425 /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
426 set
.grid_efficiency
= 1;
427 for (d
= 0; d
< DIM
; d
++)
429 set
.grid_efficiency
*= (set
.grid
[d
] * sp
) / norm(pme_lb
->box_start
[d
]);
431 /* The Ewald coefficient is inversly proportional to the cut-off */
432 set
.ewaldcoeff_q
= pme_lb
->setup
[0].ewaldcoeff_q
* pme_lb
->setup
[0].rcut_coulomb
/ set
.rcut_coulomb
;
433 /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
434 set
.ewaldcoeff_lj
= pme_lb
->setup
[0].ewaldcoeff_lj
* pme_lb
->setup
[0].rcut_coulomb
/ set
.rcut_coulomb
;
441 fprintf(debug
, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n", set
.grid
[XX
],
442 set
.grid
[YY
], set
.grid
[ZZ
], set
.rcut_coulomb
);
444 pme_lb
->setup
.push_back(set
);
448 /*! \brief Print the PME grid */
449 static void print_grid(FILE* fp_err
, FILE* fp_log
, const char* pre
, const char* desc
, const pme_setup_t
* set
, double cycles
)
451 auto buf
= gmx::formatString("%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f", pre
, desc
,
452 set
->grid
[XX
], set
->grid
[YY
], set
->grid
[ZZ
], set
->rcut_coulomb
);
455 buf
+= gmx::formatString(": %.1f M-cycles", cycles
* 1e-6);
457 if (fp_err
!= nullptr)
459 fprintf(fp_err
, "\r%s\n", buf
.c_str());
462 if (fp_log
!= nullptr)
464 fprintf(fp_log
, "%s\n", buf
.c_str());
468 /*! \brief Return the index of the last setup used in PME load balancing */
469 static int pme_loadbal_end(pme_load_balancing_t
* pme_lb
)
471 /* In the initial stage only n is set; end is not set yet */
478 return pme_lb
->setup
.size();
482 /*! \brief Print descriptive string about what limits PME load balancing */
483 static void print_loadbal_limited(FILE* fp_err
, FILE* fp_log
, int64_t step
, pme_load_balancing_t
* pme_lb
)
485 auto buf
= gmx::formatString(
486 "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
487 gmx::int64ToString(step
).c_str(), pmelblim_str
[pme_lb
->elimited
],
488 pme_lb
->setup
[pme_loadbal_end(pme_lb
) - 1].rcut_coulomb
);
489 if (fp_err
!= nullptr)
491 fprintf(fp_err
, "\r%s\n", buf
.c_str());
494 if (fp_log
!= nullptr)
496 fprintf(fp_log
, "%s\n", buf
.c_str());
500 /*! \brief Switch load balancing to stage 1
502 * In this stage, only reasonably fast setups are run again. */
503 static void switch_to_stage1(pme_load_balancing_t
* pme_lb
)
505 /* Increase start until we find a setup that is not slower than
506 * maxRelativeSlowdownAccepted times the fastest setup.
508 pme_lb
->start
= pme_lb
->lower_limit
;
509 while (pme_lb
->start
+ 1 < gmx::ssize(pme_lb
->setup
)
510 && (pme_lb
->setup
[pme_lb
->start
].count
== 0
511 || pme_lb
->setup
[pme_lb
->start
].cycles
512 > pme_lb
->setup
[pme_lb
->fastest
].cycles
* maxRelativeSlowdownAccepted
))
516 /* While increasing start, we might have skipped setups that we did not
517 * time during stage 0. We want to extend the range for stage 1 to include
518 * any skipped setups that lie between setups that were measured to be
519 * acceptably fast and too slow.
521 while (pme_lb
->start
> pme_lb
->lower_limit
&& pme_lb
->setup
[pme_lb
->start
- 1].count
== 0)
526 /* Decrease end only with setups that we timed and that are slow. */
527 pme_lb
->end
= pme_lb
->setup
.size();
528 if (pme_lb
->setup
[pme_lb
->end
- 1].count
> 0
529 && pme_lb
->setup
[pme_lb
->end
- 1].cycles
530 > pme_lb
->setup
[pme_lb
->fastest
].cycles
* maxRelativeSlowdownAccepted
)
537 /* Next we want to choose setup pme_lb->end-1, but as we will decrease
538 * pme_lb->cur by one right after returning, we set cur to end.
540 pme_lb
->cur
= pme_lb
->end
;
543 /*! \brief Process the timings and try to adjust the PME grid and Coulomb cut-off
545 * The adjustment is done to generate a different non-bonded PP and PME load.
546 * With separate PME ranks (PP and PME on different processes) or with
547 * a GPU (PP on GPU, PME on CPU), PP and PME run on different resources
548 * and changing the load will affect the load balance and performance.
549 * The total time for a set of integration steps is monitored and a range
550 * of grid/cut-off setups is scanned. After calling pme_load_balance many
551 * times and acquiring enough statistics, the best performing setup is chosen.
552 * Here we try to take into account fluctuations and changes due to external
553 * factors as well as DD load balancing.
555 static void pme_load_balance(pme_load_balancing_t
* pme_lb
,
559 const gmx::MDLogger
& mdlog
,
560 const t_inputrec
& ir
,
562 gmx::ArrayRef
<const gmx::RVec
> x
,
564 interaction_const_t
* ic
,
565 struct nonbonded_verlet_t
* nbv
,
566 struct gmx_pme_t
** pmedata
,
572 char buf
[STRLEN
], sbuf
[22];
576 gmx_sumd(1, &cycles
, cr
);
577 cycles
/= cr
->nnodes
;
580 set
= &pme_lb
->setup
[pme_lb
->cur
];
583 /* Skip the first c_numPostSwitchTuningIntervalSkip cycles because the first step
584 * after a switch is much slower due to allocation and/or caching effects.
586 if (set
->count
% (c_numPostSwitchTuningIntervalSkip
+ 1) != 0)
591 sprintf(buf
, "step %4s: ", gmx_step_str(step
, sbuf
));
592 print_grid(fp_err
, fp_log
, buf
, "timed with", set
, cycles
);
594 GMX_RELEASE_ASSERT(set
->count
> c_numPostSwitchTuningIntervalSkip
, "We should skip cycles");
595 if (set
->count
== (c_numPostSwitchTuningIntervalSkip
+ 1))
597 set
->cycles
= cycles
;
601 if (cycles
* maxFluctuationAccepted
< set
->cycles
&& pme_lb
->stage
== pme_lb
->nstage
- 1)
603 /* The performance went up a lot (due to e.g. DD load balancing).
604 * Add a stage, keep the minima, but rescan all setups.
611 "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this "
613 "Increased the number stages to %d"
614 " and ignoring the previous performance\n",
615 set
->grid
[XX
], set
->grid
[YY
], set
->grid
[ZZ
], set
->cycles
* 1e-6,
616 cycles
* 1e-6, maxFluctuationAccepted
, pme_lb
->nstage
);
619 set
->cycles
= std::min(set
->cycles
, cycles
);
622 if (set
->cycles
< pme_lb
->setup
[pme_lb
->fastest
].cycles
)
624 pme_lb
->fastest
= pme_lb
->cur
;
626 if (DOMAINDECOMP(cr
))
628 /* We found a new fastest setting, ensure that with subsequent
629 * shorter cut-off's the dynamic load balancing does not make
630 * the use of the current cut-off impossible. This solution is
631 * a trade-off, as the PME load balancing and DD domain size
632 * load balancing can interact in complex ways.
633 * With the Verlet kernels, DD load imbalance will usually be
634 * mainly due to bonded interaction imbalance, which will often
635 * quickly push the domain boundaries beyond the limit for the
636 * optimal, PME load balanced, cut-off. But it could be that
637 * better overal performance can be obtained with a slightly
638 * shorter cut-off and better DD load balancing.
640 set_dd_dlb_max_cutoff(cr
, pme_lb
->setup
[pme_lb
->fastest
].rlistOuter
);
643 cycles_fast
= pme_lb
->setup
[pme_lb
->fastest
].cycles
;
645 /* Check in stage 0 if we should stop scanning grids.
646 * Stop when the time is more than maxRelativeSlowDownAccepted longer than the fastest.
648 if (pme_lb
->stage
== 0 && pme_lb
->cur
> 0
649 && cycles
> pme_lb
->setup
[pme_lb
->fastest
].cycles
* maxRelativeSlowdownAccepted
)
651 pme_lb
->setup
.resize(pme_lb
->cur
+ 1);
652 /* Done with scanning, go to stage 1 */
653 switch_to_stage1(pme_lb
);
656 if (pme_lb
->stage
== 0)
660 gridsize_start
= set
->grid
[XX
] * set
->grid
[YY
] * set
->grid
[ZZ
];
664 if (pme_lb
->cur
+ 1 < gmx::ssize(pme_lb
->setup
))
666 /* We had already generated the next setup */
671 /* Find the next setup */
672 OK
= pme_loadbal_increase_cutoff(pme_lb
, ir
.pme_order
, cr
->dd
);
676 pme_lb
->elimited
= epmelblimPMEGRID
;
681 && pme_lb
->setup
[pme_lb
->cur
+ 1].spacing
> c_maxSpacingScaling
* pme_lb
->setup
[0].spacing
)
684 pme_lb
->elimited
= epmelblimMAXSCALING
;
687 if (OK
&& ir
.pbcType
!= PbcType::No
)
689 OK
= (gmx::square(pme_lb
->setup
[pme_lb
->cur
+ 1].rlistOuter
)
690 <= max_cutoff2(ir
.pbcType
, box
));
693 pme_lb
->elimited
= epmelblimBOX
;
701 if (DOMAINDECOMP(cr
))
703 OK
= change_dd_cutoff(cr
, box
, x
, pme_lb
->setup
[pme_lb
->cur
].rlistOuter
);
706 /* Failed: do not use this setup */
708 pme_lb
->elimited
= epmelblimDD
;
714 /* We hit the upper limit for the cut-off,
715 * the setup should not go further than cur.
717 pme_lb
->setup
.resize(pme_lb
->cur
+ 1);
718 print_loadbal_limited(fp_err
, fp_log
, step
, pme_lb
);
719 /* Switch to the next stage */
720 switch_to_stage1(pme_lb
);
723 && !(pme_lb
->setup
[pme_lb
->cur
].grid
[XX
] * pme_lb
->setup
[pme_lb
->cur
].grid
[YY
]
724 * pme_lb
->setup
[pme_lb
->cur
].grid
[ZZ
]
725 < gridsize_start
* gridpointsScaleFactor
726 && pme_lb
->setup
[pme_lb
->cur
].grid_efficiency
727 < pme_lb
->setup
[pme_lb
->cur
- 1].grid_efficiency
* relativeEfficiencyFactor
));
730 if (pme_lb
->stage
> 0 && pme_lb
->end
== 1)
732 pme_lb
->cur
= pme_lb
->lower_limit
;
733 pme_lb
->stage
= pme_lb
->nstage
;
735 else if (pme_lb
->stage
> 0 && pme_lb
->end
> 1)
737 /* If stage = nstage-1:
738 * scan over all setups, rerunning only those setups
739 * which are not much slower than the fastest
742 * Note that we loop backward to minimize the risk of the cut-off
743 * getting limited by DD DLB, since the DLB cut-off limit is set
744 * to the fastest PME setup.
748 if (pme_lb
->cur
> pme_lb
->start
)
756 pme_lb
->cur
= pme_lb
->end
- 1;
758 } while (pme_lb
->stage
== pme_lb
->nstage
- 1 && pme_lb
->setup
[pme_lb
->cur
].count
> 0
759 && pme_lb
->setup
[pme_lb
->cur
].cycles
> cycles_fast
* maxRelativeSlowdownAccepted
);
761 if (pme_lb
->stage
== pme_lb
->nstage
)
763 /* We are done optimizing, use the fastest setup we found */
764 pme_lb
->cur
= pme_lb
->fastest
;
768 if (DOMAINDECOMP(cr
) && pme_lb
->stage
> 0)
770 OK
= change_dd_cutoff(cr
, box
, x
, pme_lb
->setup
[pme_lb
->cur
].rlistOuter
);
773 /* For some reason the chosen cut-off is incompatible with DD.
774 * We should continue scanning a more limited range of cut-off's.
776 if (pme_lb
->cur
> 1 && pme_lb
->stage
== pme_lb
->nstage
)
778 /* stage=nstage says we're finished, but we should continue
779 * balancing, so we set back stage which was just incremented.
783 if (pme_lb
->cur
<= pme_lb
->fastest
)
785 /* This should not happen, as we set limits on the DLB bounds.
786 * But we implement a complete failsafe solution anyhow.
788 GMX_LOG(mdlog
.warning
)
790 .appendTextFormatted(
791 "The fastest PP/PME load balancing setting (cutoff %.3d nm) is no "
792 "longer available due to DD DLB or box size limitations",
794 pme_lb
->fastest
= pme_lb
->lower_limit
;
795 pme_lb
->start
= pme_lb
->lower_limit
;
797 /* Limit the range to below the current cut-off, scan from start */
798 pme_lb
->end
= pme_lb
->cur
;
799 pme_lb
->cur
= pme_lb
->start
;
800 pme_lb
->elimited
= epmelblimDD
;
801 print_loadbal_limited(fp_err
, fp_log
, step
, pme_lb
);
805 /* Change the Coulomb cut-off and the PME grid */
807 set
= &pme_lb
->setup
[pme_lb
->cur
];
809 ic
->rcoulomb
= set
->rcut_coulomb
;
810 nbv
->changePairlistRadii(set
->rlistOuter
, set
->rlistInner
);
811 ic
->ewaldcoeff_q
= set
->ewaldcoeff_q
;
812 /* TODO: centralize the code that sets the potentials shifts */
813 if (ic
->coulomb_modifier
== eintmodPOTSHIFT
)
815 GMX_RELEASE_ASSERT(ic
->rcoulomb
!= 0, "Cutoff radius cannot be zero");
816 ic
->sh_ewald
= std::erfc(ic
->ewaldcoeff_q
* ic
->rcoulomb
) / ic
->rcoulomb
;
818 if (EVDW_PME(ic
->vdwtype
))
820 /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
821 ic
->rvdw
= set
->rcut_coulomb
;
822 ic
->ewaldcoeff_lj
= set
->ewaldcoeff_lj
;
823 if (ic
->vdw_modifier
== eintmodPOTSHIFT
)
827 ic
->dispersion_shift
.cpot
= -1.0 / gmx::power6(static_cast<double>(ic
->rvdw
));
828 ic
->repulsion_shift
.cpot
= -1.0 / gmx::power12(static_cast<double>(ic
->rvdw
));
829 crc2
= gmx::square(ic
->ewaldcoeff_lj
* ic
->rvdw
);
831 (std::exp(-crc2
) * (1 + crc2
+ 0.5 * crc2
* crc2
) - 1) / gmx::power6(ic
->rvdw
);
835 /* We always re-initialize the tables whether they are used or not */
836 init_interaction_const_tables(nullptr, ic
, ir
.tabext
);
838 Nbnxm::gpu_pme_loadbal_update_param(nbv
, ic
);
840 if (!pme_lb
->bSepPMERanks
)
843 * CPU PME keeps a list of allocated pmedata's, that's why pme_lb->setup[pme_lb->cur].pmedata is not always nullptr.
844 * GPU PME, however, currently needs the gmx_pme_reinit always called on load balancing
845 * (pme_gpu_reinit might be not sufficiently decoupled from gmx_pme_init).
846 * This can lead to a lot of reallocations for PME GPU.
847 * Would be nicer if the allocated grid list was hidden within a single pmedata structure.
849 if ((pme_lb
->setup
[pme_lb
->cur
].pmedata
== nullptr)
850 || pme_gpu_task_enabled(pme_lb
->setup
[pme_lb
->cur
].pmedata
))
852 /* Generate a new PME data structure,
853 * copying part of the old pointers.
855 gmx_pme_reinit(&set
->pmedata
, cr
, pme_lb
->setup
[0].pmedata
, &ir
, set
->grid
,
856 set
->ewaldcoeff_q
, set
->ewaldcoeff_lj
);
858 *pmedata
= set
->pmedata
;
862 /* Tell our PME-only rank to switch grid */
863 gmx_pme_send_switchgrid(cr
, set
->grid
, set
->ewaldcoeff_q
, set
->ewaldcoeff_lj
);
868 print_grid(nullptr, debug
, "", "switched to", set
, -1);
871 if (pme_lb
->stage
== pme_lb
->nstage
)
873 print_grid(fp_err
, fp_log
, "", "optimal", set
, -1);
877 /*! \brief Prepare for another round of PME load balancing
879 * \param[in,out] pme_lb Pointer to PME load balancing struct
880 * \param[in] bDlbUnlocked TRUE is DLB was locked and is now unlocked
882 * If the conditions (e.g. DLB off/on, CPU/GPU throttling etc.) changed,
883 * the PP/PME balance might change and re-balancing can improve performance.
884 * This function adds 2 stages and adjusts the considered setup range.
886 static void continue_pme_loadbal(pme_load_balancing_t
* pme_lb
, gmx_bool bDlbUnlocked
)
888 /* Add 2 tuning stages, keep the detected end of the setup range */
890 if (bDlbUnlocked
&& pme_lb
->bSepPMERanks
)
892 /* With separate PME ranks, DLB should always lower the PP load and
893 * can only increase the PME load (more communication and imbalance),
894 * so we only need to scan longer cut-off's.
896 pme_lb
->lower_limit
= pme_lb
->cur
;
898 pme_lb
->start
= pme_lb
->lower_limit
;
901 void pme_loadbal_do(pme_load_balancing_t
* pme_lb
,
905 const gmx::MDLogger
& mdlog
,
906 const t_inputrec
& ir
,
909 gmx::ArrayRef
<const gmx::RVec
> x
,
910 gmx_wallcycle_t wcycle
,
914 bool useGpuPmePpCommunication
)
919 assert(pme_lb
!= nullptr);
921 if (!pme_lb
->bActive
)
926 n_prev
= pme_lb
->cycles_n
;
927 cycles_prev
= pme_lb
->cycles_c
;
928 wallcycle_get(wcycle
, ewcSTEP
, &pme_lb
->cycles_n
, &pme_lb
->cycles_c
);
930 /* Before the first step we haven't done any steps yet.
931 * Also handle cases where ir.init_step % ir.nstlist != 0.
932 * We also want to skip a number of steps and seconds while
933 * the CPU and GPU, when used, performance stabilizes.
935 if (!PAR(cr
) || (DOMAINDECOMP(cr
) && DDMASTER(cr
->dd
)))
937 pme_lb
->startupTimeDelayElapsed
= (gmx_gettime() - pme_lb
->startTime
< c_startupTimeDelay
);
939 if (DOMAINDECOMP(cr
))
941 dd_bcast(cr
->dd
, sizeof(bool), &pme_lb
->startupTimeDelayElapsed
);
944 if (pme_lb
->cycles_n
== 0 || step_rel
< c_numFirstTuningIntervalSkip
* ir
.nstlist
945 || pme_lb
->startupTimeDelayElapsed
)
950 /* Sanity check, we expect nstlist cycle counts */
951 if (pme_lb
->cycles_n
- n_prev
!= ir
.nstlist
)
953 /* We could return here, but it's safer to issue an error and quit */
954 gmx_incons("pme_loadbal_do called at an interval != nstlist");
957 /* PME grid + cut-off optimization with GPUs or PME ranks */
958 if (!pme_lb
->bBalance
&& pme_lb
->bSepPMERanks
)
960 if (pme_lb
->bTriggerOnDLB
)
962 pme_lb
->bBalance
= dd_dlb_is_on(cr
->dd
);
964 /* We should ignore the first timing to avoid timing allocation
965 * overhead. And since the PME load balancing is called just
966 * before DD repartitioning, the ratio returned by dd_pme_f_ratio
967 * is not over the last nstlist steps, but the nstlist steps before
968 * that. So the first useful ratio is available at step_rel=3*nstlist.
970 else if (step_rel
>= c_numFirstTuningIntervalSkipWithSepPme
* ir
.nstlist
)
972 GMX_ASSERT(DOMAINDECOMP(cr
), "Domain decomposition should be active here");
973 if (DDMASTER(cr
->dd
))
975 /* If PME rank load is too high, start tuning. If
976 PME-PP direct GPU communication is active,
977 unconditionally start tuning since ratio will be
978 unreliable due to CPU-GPU asynchronicity in codepath */
979 pme_lb
->bBalance
= useGpuPmePpCommunication
981 : (dd_pme_f_ratio(cr
->dd
) >= loadBalanceTriggerFactor
);
983 dd_bcast(cr
->dd
, sizeof(gmx_bool
), &pme_lb
->bBalance
);
986 pme_lb
->bActive
= (pme_lb
->bBalance
|| step_rel
<= pme_lb
->step_rel_stop
);
989 /* The location in the code of this balancing termination is strange.
990 * You would expect to have it after the call to pme_load_balance()
991 * below, since there pme_lb->stage is updated.
992 * But when terminating directly after deciding on and selecting the
993 * optimal setup, DLB will turn on right away if it was locked before.
994 * This might be due to PME reinitialization. So we check stage here
995 * to allow for another nstlist steps with DLB locked to stabilize
998 if (pme_lb
->bBalance
&& pme_lb
->stage
== pme_lb
->nstage
)
1000 pme_lb
->bBalance
= FALSE
;
1002 if (DOMAINDECOMP(cr
) && dd_dlb_is_locked(cr
->dd
))
1004 /* Unlock the DLB=auto, DLB is allowed to activate */
1005 dd_dlb_unlock(cr
->dd
);
1006 GMX_LOG(mdlog
.warning
)
1008 .appendText("NOTE: DLB can now turn on, when beneficial");
1010 /* We don't deactivate the tuning yet, since we will balance again
1011 * after DLB gets turned on, if it does within PMETune_period.
1013 continue_pme_loadbal(pme_lb
, TRUE
);
1014 pme_lb
->bTriggerOnDLB
= TRUE
;
1015 pme_lb
->step_rel_stop
= step_rel
+ PMETunePeriod
* ir
.nstlist
;
1019 /* We're completely done with PME tuning */
1020 pme_lb
->bActive
= FALSE
;
1023 if (DOMAINDECOMP(cr
))
1025 /* Set the cut-off limit to the final selected cut-off,
1026 * so we don't have artificial DLB limits.
1027 * This also ensures that we won't disable the currently
1028 * optimal setting during a second round of PME balancing.
1030 set_dd_dlb_max_cutoff(cr
, fr
->nbv
->pairlistOuterRadius());
1034 if (pme_lb
->bBalance
)
1036 /* We might not have collected nstlist steps in cycles yet,
1037 * since init_step might not be a multiple of nstlist,
1038 * but the first data collected is skipped anyhow.
1040 pme_load_balance(pme_lb
, cr
, fp_err
, fp_log
, mdlog
, ir
, box
, x
,
1041 pme_lb
->cycles_c
- cycles_prev
, fr
->ic
, fr
->nbv
.get(), &fr
->pmedata
, step
);
1043 /* Update deprecated rlist in forcerec to stay in sync with fr->nbv */
1044 fr
->rlist
= fr
->nbv
->pairlistOuterRadius();
1046 if (ir
.eDispCorr
!= edispcNO
)
1048 fr
->dispersionCorrection
->setParameters(*fr
->ic
);
1052 if (!pme_lb
->bBalance
&& (!pme_lb
->bSepPMERanks
|| step_rel
> pme_lb
->step_rel_stop
))
1054 /* We have just deactivated the balancing and we're not measuring PP/PME
1055 * imbalance during the first steps of the run: deactivate the tuning.
1057 pme_lb
->bActive
= FALSE
;
1060 if (!(pme_lb
->bActive
) && DOMAINDECOMP(cr
) && dd_dlb_is_locked(cr
->dd
))
1062 /* Make sure DLB is allowed when we deactivate PME tuning */
1063 dd_dlb_unlock(cr
->dd
);
1064 GMX_LOG(mdlog
.warning
)
1066 .appendText("NOTE: DLB can now turn on, when beneficial");
1069 *bPrinting
= pme_lb
->bBalance
;
1072 /*! \brief Return product of the number of PME grid points in each dimension */
1073 static int pme_grid_points(const pme_setup_t
* setup
)
1075 return setup
->grid
[XX
] * setup
->grid
[YY
] * setup
->grid
[ZZ
];
1078 /*! \brief Print one load-balancing setting */
1079 static void print_pme_loadbal_setting(FILE* fplog
, const char* name
, const pme_setup_t
* setup
)
1081 fprintf(fplog
, " %-7s %6.3f nm %6.3f nm %3d %3d %3d %5.3f nm %5.3f nm\n", name
,
1082 setup
->rcut_coulomb
, setup
->rlistInner
, setup
->grid
[XX
], setup
->grid
[YY
],
1083 setup
->grid
[ZZ
], setup
->spacing
, 1 / setup
->ewaldcoeff_q
);
1086 /*! \brief Print all load-balancing settings */
1087 static void print_pme_loadbal_settings(pme_load_balancing_t
* pme_lb
,
1089 const gmx::MDLogger
& mdlog
,
1090 gmx_bool bNonBondedOnGPU
)
1092 double pp_ratio
, grid_ratio
;
1093 real pp_ratio_temporary
;
1095 pp_ratio_temporary
= pme_lb
->setup
[pme_lb
->cur
].rlistInner
/ pme_lb
->setup
[0].rlistInner
;
1096 pp_ratio
= gmx::power3(pp_ratio_temporary
);
1097 grid_ratio
= pme_grid_points(&pme_lb
->setup
[pme_lb
->cur
])
1098 / static_cast<double>(pme_grid_points(&pme_lb
->setup
[0]));
1100 fprintf(fplog
, "\n");
1101 fprintf(fplog
, " P P - P M E L O A D B A L A N C I N G\n");
1102 fprintf(fplog
, "\n");
1103 /* Here we only warn when the optimal setting is the last one */
1104 if (pme_lb
->elimited
!= epmelblimNO
&& pme_lb
->cur
== pme_loadbal_end(pme_lb
) - 1)
1106 fprintf(fplog
, " NOTE: The PP/PME load balancing was limited by the %s,\n",
1107 pmelblim_str
[pme_lb
->elimited
]);
1108 fprintf(fplog
, " you might not have reached a good load balance.\n");
1109 if (pme_lb
->elimited
== epmelblimDD
)
1111 fprintf(fplog
, " Try different mdrun -dd settings or lower the -dds value.\n");
1113 fprintf(fplog
, "\n");
1115 fprintf(fplog
, " PP/PME load balancing changed the cut-off and PME settings:\n");
1116 fprintf(fplog
, " particle-particle PME\n");
1117 fprintf(fplog
, " rcoulomb rlist grid spacing 1/beta\n");
1118 print_pme_loadbal_setting(fplog
, "initial", &pme_lb
->setup
[0]);
1119 print_pme_loadbal_setting(fplog
, "final", &pme_lb
->setup
[pme_lb
->cur
]);
1120 fprintf(fplog
, " cost-ratio %4.2f %4.2f\n", pp_ratio
, grid_ratio
);
1121 fprintf(fplog
, " (note that these numbers concern only part of the total PP and PME load)\n");
1123 if (pp_ratio
> 1.5 && !bNonBondedOnGPU
)
1125 GMX_LOG(mdlog
.warning
)
1128 "NOTE: PME load balancing increased the non-bonded workload by more than "
1130 " For better performance, use (more) PME ranks (mdrun -npme),\n"
1131 " or if you are beyond the scaling limit, use fewer total ranks (or "
1136 fprintf(fplog
, "\n");
1140 void pme_loadbal_done(pme_load_balancing_t
* pme_lb
, FILE* fplog
, const gmx::MDLogger
& mdlog
, gmx_bool bNonBondedOnGPU
)
1142 if (fplog
!= nullptr && (pme_lb
->cur
> 0 || pme_lb
->elimited
!= epmelblimNO
))
1144 print_pme_loadbal_settings(pme_lb
, fplog
, mdlog
, bNonBondedOnGPU
);