2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Declares implementation functions and types for the domain
38 * decomposition module.
40 * \author Berk Hess <hess@kth.se>
41 * \ingroup module_domdec
43 #ifndef GMX_DOMDEC_DOMDEC_INTERNAL_H
44 #define GMX_DOMDEC_DOMDEC_INTERNAL_H
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/mdlib/updategroupscog.h"
51 #include "gromacs/timing/cyclecounter.h"
52 #include "gromacs/topology/block.h"
58 #define DD_NLOAD_MAX 9
62 //! Indices to communicate in a dimension
63 struct gmx_domdec_ind_t
66 /*! \brief The numbers of charge groups to send and receive for each
67 * cell that requires communication, the last entry contains the total
68 * number of atoms that needs to be communicated.
70 int nsend
[DD_MAXIZONE
+2] = {};
71 int nrecv
[DD_MAXIZONE
+2] = {};
73 //! The charge groups to send
74 std::vector
<int> index
;
76 /* The atom range for non-in-place communication */
77 int cell2at0
[DD_MAXIZONE
] = {};
78 int cell2at1
[DD_MAXIZONE
] = {};
82 //! Things relating to index communication
83 struct gmx_domdec_comm_dim_t
85 /* Returns the number of grid pulses (the number of domains in the halo along this dimension) */
91 /**< For dlb, for use with edlbAUTO */
93 /**< The indices to communicate, size np */
94 std::vector
<gmx_domdec_ind_t
> ind
;
95 /**< Can we receive data in place? */
96 bool receiveInPlace
= false;
99 /*! \brief Load balancing data along a dim used on the master rank of that dim */
104 /**< State var.: max lower bound., incl. neighbors */
105 real cellFracLowerMax
= 0;
106 /**< State var.: min upper bound., incl. neighbors */
107 real cellFracUpperMin
= 0;
108 /**< Temp. var.: lower limit for cell boundary */
110 /**< Temp. var.: upper limit for cell boundary */
114 /**< Temp. var.: is this cell size at the limit */
115 std::vector
<bool> isCellMin
;
116 /**< State var.: cell boundaries, box relative */
117 std::vector
<real
> cellFrac
;
118 /**< Temp. var.: old cell size */
119 std::vector
<real
> oldCellFrac
;
121 std::vector
<Bounds
> bounds
;
122 /**< State var.: is DLB limited in this row */
123 bool dlbIsLimited
= false;
125 std::vector
<real
> buf_ncd
;
128 /*! \brief Struct for managing cell sizes with DLB along a dimension */
129 struct DDCellsizesWithDlb
131 /**< Cell row root struct, only available on the first rank in a row */
132 std::unique_ptr
<RowMaster
> rowMaster
;
133 /**< The cell sizes, in fractions, along a row, not available on the first rank in a row */
134 std::vector
<real
> fracRow
;
135 /**< The lower corner, in fractions, in triclinic space */
137 /**< The upper corner, in fractions, in triclinic space */
139 /**< The maximum lower corner among all our neighbors */
140 real fracLowerMax
= 0;
141 /**< The minimum upper corner among all our neighbors */
142 real fracUpperMin
= 0;
145 /*! \brief Struct for compute load commuication
147 * Here floats are accurate enough, since these variables
148 * only influence the load balancing, not the actual MD results.
152 /**< The number of load recordings */
154 /**< Scan of the sum of load over dimensions */
155 float *load
= nullptr;
156 /**< The sum of the load over the ranks up to our current dimension */
158 /**< The maximum over the ranks contributing to \p sum */
160 /**< Like \p sum, but takes the maximum when the load balancing is limited */
162 /**< Minimum cell volume, relative to the box */
164 /**< The PP time during which PME can overlap */
166 /**< The PME-only rank load */
168 /**< Bit flags that tell if DLB was limited, per dimension */
172 /*! \brief Data needed to sort an atom to the desired location in the local state */
175 /**< Neighborsearch grid cell index */
177 /**< Global atom/charge group index */
179 /**< Local atom/charge group index */
183 /*! \brief Temporary buffers for sorting atoms */
186 /**< Sorted array of indices */
187 std::vector
<gmx_cgsort_t
> sorted
;
188 /**< Array of stationary atom/charge group indices */
189 std::vector
<gmx_cgsort_t
> stationary
;
190 /**< Array of moved atom/charge group indices */
191 std::vector
<gmx_cgsort_t
> moved
;
192 /**< Integer buffer for sorting */
193 std::vector
<int> intBuffer
;
196 /*! \brief Manages atom ranges and order for the local state atom vectors */
200 /*! \brief The local state atom order
202 * This enum determines the order of the atoms in the local state.
203 * ddnatHOME and ddnatZONE should be first and second,
204 * the others can be ordered as wanted.
206 enum class Type
: int
208 Home
, /**< The home atoms */
209 Zones
, /**< All zones in the eighth shell */
210 Vsites
, /**< Atoms communicated for virtual sites */
211 Constraints
, /**< Atoms communicated for constraints */
212 Number
/**< Not a count, only present for convenience */
215 /*! \brief Returns the start atom index for range \p rangeType */
216 int start(Type rangeType
) const
218 if (rangeType
== Type::Home
)
224 return end_
[static_cast<int>(rangeType
) - 1];
228 /*! \brief Returns the end atom index for range \p rangeType */
229 int end(Type rangeType
) const
231 return end_
[static_cast<int>(rangeType
)];
234 /*! \brief Returns the number of home atoms */
235 int numHomeAtoms() const
237 return end_
[static_cast<int>(Type::Home
)];
240 /*! \brief Returns the total number of atoms */
241 int numAtomsTotal() const
243 return end_
[static_cast<int>(Type::Number
) - 1];
246 /*! \brief Sets the end index of range \p rangeType to \p end
248 * This should be called either with Type::Home or with a type
249 * that is larger than that passed in the previous call to setEnd.
250 * A release assertion for these conditions are present.
252 void setEnd(Type rangeType
,
255 GMX_RELEASE_ASSERT(rangeType
== Type::Home
|| rangeType
> lastTypeSet_
, "Can only set either home or a larger type than the last one");
257 for (int i
= static_cast<int>(rangeType
); i
< static_cast<int>(Type::Number
); i
++)
262 lastTypeSet_
= rangeType
;
266 /*! \brief The list of end atom indices */
267 std::array
<int, static_cast<int>(Type::Number
)> end_
;
268 Type lastTypeSet_
= Type::Number
;
271 /*! \brief Enum of dynamic load balancing states
273 * Allowed DLB states and transitions
274 * - intialization at startup:
275 * -> offUser ("-dlb no")
276 * -> onUser ("-dlb yes")
277 * -> offCanTurnOn ("-dlb auto")
279 * - in automatic mode (i.e. initial state offCanTurnOn):
280 * offCanTurnOn -> onCanTurnOff
281 * offCanTurnOn -> offForever
282 * offCanTurnOn -> offTemporarilyLocked
283 * offTemporarilyLocked -> offCanTurnOn
284 * onCanTurnOff -> offCanTurnOn
288 offUser
, /**< DLB is permanently off per user request */
289 offForever
, /**< DLB is off due to a runtime condition (not supported or causes performance loss) and will never be turned on */
290 offCanTurnOn
, /**< DLB is off and will turn on on imbalance */
291 offTemporarilyLocked
, /**< DLB is off and temporarily can't turn on */
292 onCanTurnOff
, /**< DLB is on and can turn off when slow */
293 onUser
, /**< DLB is permanently on per user request */
294 nr
/**< The number of DLB states */
297 /*! \brief The PME domain decomposition for one dimension */
300 /**< The dimension */
302 /**< Tells if DD and PME dims match */
303 gmx_bool dim_match
= false;
304 /**< The number of PME ranks/domains in this dimension */
306 /**< Cell sizes for determining the PME comm. with SLB */
307 real
*slb_dim_f
= nullptr;
308 /**< The minimum pp node location, size nslab */
309 int *pp_min
= nullptr;
310 /**< The maximum pp node location, size nslab */
311 int *pp_max
= nullptr;
312 /**< The maximum shift for coordinate redistribution in PME */
318 /**< The minimum bottom of this zone */
320 /**< The maximum top of this zone */
322 /**< The minimum top of this zone */
324 /**< The maximum bottom communicaton height for this zone */
326 /**< The maximum top communicaton height for this zone */
328 /**< The bottom value of the first cell in this zone */
330 /**< The top value of the first cell in this zone */
332 /**< Bool disguised as a real, 1 when the above data has been set. 0 otherwise */
336 /*! \brief The number of reals in gmx_ddzone_t */
337 constexpr int c_ddzoneNumReals
= 8;
339 template<typename T
> class DDBufferAccess
;
341 /*! \brief Temporary storage container that minimizes (re)allocation and clearing
343 * This is only the storage, actual access happens through DDBufferAccess.
344 * All methods check if the buffer is (not) in use.
350 /*! \brief Returns a buffer of size \p numElements, the elements are undefined */
351 gmx::ArrayRef
<T
> resize(size_t numElements
)
353 GMX_ASSERT(isInUse_
, "Should only operate on acquired buffers");
355 if (numElements
> buffer_
.size())
357 buffer_
.resize(numElements
);
360 return gmx::arrayRefFromArray(buffer_
.data(), numElements
);
363 /*! \brief Acquire the buffer for use with size set to \p numElements, the elements are undefined */
364 gmx::ArrayRef
<T
> acquire(size_t numElements
)
366 GMX_RELEASE_ASSERT(!isInUse_
, "Should only request free buffers");
369 return resize(numElements
);
372 /*! \brief Releases the buffer, buffer_ should not be used after this */
375 GMX_RELEASE_ASSERT(isInUse_
, "Should only release buffers in use");
379 std::vector
<T
> buffer_
; /**< The actual memory buffer */
380 bool isInUse_
= false; /**< Flag that tells whether the buffer is in use */
382 friend class DDBufferAccess
<T
>;
385 /*! \brief Class that manages access to a temporary memory buffer */
390 /*! \brief Constructor, returns a buffer of size \p numElements, element values are undefined
392 * \note The actual memory buffer \p ddBuffer can not be used to
393 * create other DDBufferAccess objects until the one created
396 DDBufferAccess(DDBuffer
<T
> &ddBuffer
,
397 size_t numElements
) :
400 buffer
= ddBuffer_
.acquire(numElements
);
408 /*! \brief Resizes the buffer to \p numElements, new elements are undefined
410 * \note The buffer arrayref is updated after this call.
412 void resize(size_t numElements
)
414 buffer
= ddBuffer_
.resize(numElements
);
418 DDBuffer
<T
> &ddBuffer_
; /**< Reference to the storage class */
420 gmx::ArrayRef
<T
> buffer
; /**< The access to the memory buffer */
423 /*! \brief Temporary buffer for setting up communiation over one pulse and all zones in the halo */
424 struct dd_comm_setup_work_t
426 /**< The local atom group indices to send */
427 std::vector
<int> localAtomGroupBuffer
;
428 /**< Buffer for collecting the global atom group indices to send */
429 std::vector
<int> atomGroupBuffer
;
430 /**< Buffer for collecting the atom group positions to send */
431 std::vector
<gmx::RVec
> positionBuffer
;
432 /**< The number of atoms contained in the atom groups to send */
434 /**< The number of atom groups to send for the last zone */
438 /*! \brief Struct for domain decomposition communication
440 * This struct contains most information about domain decomposition
441 * communication setup, some communication buffers, some statistics
442 * and also the setup for the communication between particle-particle
443 * and PME only ranks.
445 * All arrays are indexed with 0 to dd->ndim (not Cartesian indexing),
446 * unless stated otherwise.
448 struct gmx_domdec_comm_t
// NOLINT (clang-analyzer-optin.performance.Padding)
450 /* PME and Cartesian communicator stuff */
451 /**< The number of decomposition dimensions for PME, 0: no PME */
452 int npmedecompdim
= 0;
453 /**< The number of ranks doing PME (PP/PME or only PME) */
455 /**< The number of PME ranks/domains along x */
457 /**< The number of PME ranks/domains along y */
459 /**< Use Cartesian communication between PP and PME ranks */
460 gmx_bool bCartesianPP_PME
= false;
461 /**< Cartesian grid for combinted PP+PME ranks */
463 /**< The number of dimensions for the PME setup that are Cartesian */
465 /**< The PME ranks, size npmenodes */
466 int *pmenodes
= nullptr;
467 /**< The Cartesian index to sim rank conversion, used with bCartesianPP_PME */
468 int *ddindex2simnodeid
= nullptr;
469 /**< The 1D or 2D PME domain decomposition setup */
470 gmx_ddpme_t ddpme
[2];
472 /* The DD particle-particle nodes only */
473 /**< Use a Cartesian communicator for PP */
474 gmx_bool bCartesianPP
= false;
475 /**< The Cartesian index to DD rank conversion, used with bCartesianPP */
476 int *ddindex2ddnodeid
= nullptr;
478 /* The DLB state, used for reloading old states, during e.g. EM */
479 /**< The global charge groups, this defined the DD state (except for the DLB state) */
480 t_block cgs_gl
= { };
482 /* Charge group / atom sorting */
483 /**< Data structure for cg/atom sorting */
484 std::unique_ptr
<gmx_domdec_sort_t
> sort
;
486 //! True when update groups are used
487 bool useUpdateGroups
= false;
488 //! Update atom grouping for each molecule type
489 std::vector
<gmx::RangePartitioning
> updateGroupingPerMoleculetype
;
490 //! Centers of mass of local update groups
491 std::unique_ptr
<gmx::UpdateGroupsCog
> updateGroupsCog
;
493 /* Are there charge groups? */
494 bool haveInterDomainBondeds
= false; /**< Are there inter-domain bonded interactions? */
495 bool haveInterDomainMultiBodyBondeds
= false; /**< Are there inter-domain multi-body interactions? */
497 /* Data for the optional bonded interaction atom communication range */
498 /**< Only communicate atoms beyond the non-bonded cut-off when they are involved in bonded interactions with non-local atoms */
499 gmx_bool bBondComm
= false;
500 /**< Links between cg's through bonded interactions */
501 t_blocka
*cglink
= nullptr;
502 /**< Local cg availability, TODO: remove when group scheme is removed */
503 char *bLocalCG
= nullptr;
505 /* The DLB state, possible values are defined above */
507 /* With dlbState=DlbState::offCanTurnOn, should we check if to DLB on at the next DD? */
508 gmx_bool bCheckWhetherToTurnDlbOn
= false;
509 /* The first DD count since we are running without DLB */
510 int ddPartioningCountFirstDlbOff
= 0;
512 /* Cell sizes for static load balancing, first index cartesian */
513 real
**slb_frac
= nullptr;
515 /* The width of the communicated boundaries */
516 /**< Cut-off for multi-body interactions, also 2-body bonded when \p cutoff_mody > \p cutoff */
517 real cutoff_mbody
= 0;
518 /**< Cut-off for non-bonded/2-body interactions */
520 /**< The minimum guaranteed cell-size, Cartesian indexing */
521 rvec cellsize_min
= { };
522 /**< The minimum guaranteed cell-size with dlb=auto */
523 rvec cellsize_min_dlb
= { };
524 /**< The lower limit for the DD cell size with DLB */
525 real cellsize_limit
= 0;
526 /**< Effectively no NB cut-off limit with DLB for systems without PBC? */
527 gmx_bool bVacDLBNoLimit
= false;
529 /** With PME load balancing we set limits on DLB */
530 gmx_bool bPMELoadBalDLBLimits
= false;
531 /** DLB needs to take into account that we want to allow this maximum
532 * cut-off (for PME load balancing), this could limit cell boundaries.
534 real PMELoadBal_max_cutoff
= 0;
536 /**< tric_dir from \p gmx_ddbox_t is only stored here because dd_get_ns_ranges needs it */
538 /**< box lower corner, required with dim's without pbc and -gcom */
540 /**< box size, required with dim's without pbc and -gcom */
543 /**< The DD cell lower corner, in triclinic space */
545 /**< The DD cell upper corner, in triclinic space */
548 /**< The old \p cell_x0, to check cg displacements */
549 rvec old_cell_x0
= { };
550 /**< The old \p cell_x1, to check cg displacements */
551 rvec old_cell_x1
= { };
553 /** The communication setup and charge group boundaries for the zones */
554 gmx_domdec_zones_t zones
;
556 /* The zone limits for DD dimensions 1 and 2 (not 0), determined from
557 * cell boundaries of neighboring cells for staggered grids when using
558 * dynamic load balancing.
560 /**< Zone limits for dim 1 with staggered grids */
561 gmx_ddzone_t zone_d1
[2];
562 /**< Zone limits for dim 2 with staggered grids */
563 gmx_ddzone_t zone_d2
[2][2];
565 /** The coordinate/force communication setup and indices */
566 gmx_domdec_comm_dim_t cd
[DIM
];
567 /** The maximum number of cells to communicate with in one dimension */
570 /** Which cg distribution is stored on the master node,
571 * stored as DD partitioning call count.
573 int64_t master_cg_ddp_count
= 0;
575 /** The number of cg's received from the direct neighbors */
576 int zone_ncg1
[DD_MAXZONE
] = {0};
578 /** The atom ranges in the local state */
579 DDAtomRanges atomRanges
;
581 /** Array for signalling if atoms have moved to another domain */
582 std::vector
<int> movedBuffer
;
584 /** Communication int buffer for general use */
585 DDBuffer
<int> intBuffer
;
587 /** Communication rvec buffer for general use */
588 DDBuffer
<gmx::RVec
> rvecBuffer
;
590 /* Temporary storage for thread parallel communication setup */
591 /**< Thread-local work data */
592 std::vector
<dd_comm_setup_work_t
> dth
;
594 /* Communication buffer only used with multiple grid pulses */
595 /**< Another rvec comm. buffer */
596 DDBuffer
<gmx::RVec
> rvecBuffer2
;
598 /* Communication buffers for local redistribution */
599 /**< Charge group flag comm. buffers */
600 std::array
<std::vector
<int>, DIM
*2> cggl_flag
;
601 /**< Charge group center comm. buffers */
602 std::array
<std::vector
<gmx::RVec
>, DIM
*2> cgcm_state
;
604 /* Cell sizes for dynamic load balancing */
605 std::vector
<DDCellsizesWithDlb
> cellsizesWithDlb
;
607 /* Stuff for load communication */
608 /**< Should we record the load */
609 gmx_bool bRecordLoad
= false;
610 /**< The recorded load data */
611 domdec_load_t
*load
= nullptr;
612 /**< The number of MPI ranks sharing the GPU our rank is using */
613 int nrank_gpu_shared
= 0;
615 /**< The MPI load communicator */
616 MPI_Comm
*mpi_comm_load
= nullptr;
617 /**< The MPI load communicator for ranks sharing a GPU */
618 MPI_Comm mpi_comm_gpu_shared
;
621 /* Information for managing the dynamic load balancing */
622 /**< Maximum DLB scaling per load balancing step in percent */
623 int dlb_scale_lim
= 0;
625 /**< Struct for timing the force load balancing region */
626 BalanceRegion
*balanceRegion
= nullptr;
628 /* Cycle counters over nstlist steps */
629 /**< Total cycles counted */
630 float cycl
[ddCyclNr
] = { };
631 /**< The number of cycle recordings */
632 int cycl_n
[ddCyclNr
] = { };
633 /**< The maximum cycle count */
634 float cycl_max
[ddCyclNr
] = { };
635 /** Flop counter (0=no,1=yes,2=with (eFlop-1)*5% noise */
637 /**< Total flops counted */
639 /**< The number of flop recordings */
641 /** How many times did we have load measurements */
643 /** How many times have we collected the load measurements */
644 int n_load_collect
= 0;
646 /* Cycle count history for DLB checks */
647 /**< The averaged cycles per step over the last nstlist step before turning on DLB */
648 float cyclesPerStepBeforeDLB
= 0;
649 /**< The running average of the cycles per step during DLB */
650 float cyclesPerStepDlbExpAverage
= 0;
651 /**< Have we turned off DLB (after turning DLB on)? */
652 bool haveTurnedOffDlb
= false;
653 /**< The DD step at which we last measured that DLB off was faster than DLB on, 0 if there was no such step */
654 int64_t dlbSlowerPartitioningCount
= 0;
656 /* Statistics for atoms */
657 /**< The atoms per range, summed over the steps */
658 double sum_nat
[static_cast<int>(DDAtomRanges::Type::Number
)] = { };
660 /* Statistics for calls and times */
661 /**< The number of partioning calls */
663 /**< The number of load recordings */
665 /**< Total MD step time */
666 double load_step
= 0.0;
667 /**< Total PP force time */
668 double load_sum
= 0.0;
669 /**< Max \p load_sum over the ranks */
670 double load_max
= 0.0;
671 /**< Was load balancing limited, per DD dim */
673 /**< Total time on PP done during PME overlap time */
674 double load_mdf
= 0.0;
675 /**< Total time on our PME-only rank */
676 double load_pme
= 0.0;
678 /** The last partition step */
679 int64_t partition_step
= 0;
682 /**< Step interval for dumping the local+non-local atoms to pdb */
684 /**< Step interval for duming the DD grid to pdb */
685 int nstDDDumpGrid
= 0;
686 /**< DD debug print level: 0, 1, 2 */
690 /*! \brief DD zone permutation
692 * Zone permutation from the Cartesian x-major/z-minor order to an order
693 * that leads to consecutive charge groups for neighbor searching.
694 * TODO: remove when the group scheme is removed
696 static const int zone_perm
[3][4] = { {0, 0, 0, 0}, {1, 0, 0, 0}, {3, 0, 1, 2} };
698 /*! \brief DD zone reordering to Cartesian order
700 * Index to reorder the zone such that the end up in Cartesian order
701 * with dimension index 0 major and dimension index 2 minor.
703 static const int zone_reorder_cartesian
[DD_MAXZONE
] = { 0, 1, 3, 2, 5, 4, 6, 7 };
705 /* dd_zo and dd_zp3 is set up such that i zones with non-zero
706 * components see only j zones with that component 0.
709 /*! \brief Returns the DD cut-off distance for multi-body interactions */
710 real
dd_cutoff_multibody(const gmx_domdec_t
*dd
);
712 /*! \brief Returns the DD cut-off distance for two-body interactions */
713 real
dd_cutoff_twobody(const gmx_domdec_t
*dd
);
715 /*! \brief Returns the domain index given the number of domains and the domain coordinates
717 * This order is required to minimize the coordinate communication in PME
718 * which uses decomposition in the x direction.
720 static inline int dd_index(const ivec numDomains
,
721 const ivec domainCoordinates
)
723 return ((domainCoordinates
[XX
]*numDomains
[YY
] + domainCoordinates
[YY
])*numDomains
[ZZ
]) + domainCoordinates
[ZZ
];
726 /*! Returns the size of the buffer to hold fractional cell boundaries for DD dimension index dimIndex */
727 static inline int ddCellFractionBufferSize(const gmx_domdec_t
*dd
,
730 return dd
->nc
[dd
->dim
[dimIndex
]] + 1 + dimIndex
*2 + 1 + dimIndex
;
733 /*! \brief Maximum number of ranks for using send/recv for state scattering and gathering
735 * Use separate MPI send and receive commands
736 * when #ranks <= c_maxNumRanksUseSendRecvForScatterAndGather
737 * This saves memory (and some copying for small #ranks).
738 * For high parallelization scatter and gather calls are used.
740 static constexpr int c_maxNumRanksUseSendRecvForScatterAndGather
= 4;
742 /*! \brief Make DD cells larger by this factor than the limit to avoid rounding issues */
743 static constexpr double DD_CELL_MARGIN
= 1.0001;
745 /*! \brief Factor for checking DD cell size limitation during DLB, should be in between 1 and DD_CELL_MARGIN */
746 static constexpr double DD_CELL_MARGIN2
= 1.00005;
748 /*! \brief With pressure scaling, keep cell sizes 2% above the limit to allow for some scaling */
749 static constexpr double DD_PRES_SCALE_MARGIN
= 1.02;