2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* IMPORTANT FOR DEVELOPERS:
39 * Triclinic pme stuff isn't entirely trivial, and we've experienced
40 * some bugs during development (many of them due to me). To avoid
41 * this in the future, please check the following things if you make
42 * changes in this file:
44 * 1. You should obtain identical (at least to the PME precision)
45 * energies, forces, and virial for
46 * a rectangular box and a triclinic one where the z (or y) axis is
47 * tilted a whole box side. For instance you could use these boxes:
49 * rectangular triclinic
54 * 2. You should check the energy conservation in a triclinic box.
56 * It might seem an overkill, but better safe than sorry.
74 #include "gromacs/domdec/domdec.h"
75 #include "gromacs/ewald/pme.h"
76 #include "gromacs/fft/parallel_3dfft.h"
77 #include "gromacs/fileio/pdbio.h"
78 #include "gromacs/gmxlib/network.h"
79 #include "gromacs/gmxlib/nrnb.h"
80 #include "gromacs/gpu_utils/hostallocator.h"
81 #include "gromacs/math/gmxcomplex.h"
82 #include "gromacs/math/units.h"
83 #include "gromacs/math/vec.h"
84 #include "gromacs/mdtypes/commrec.h"
85 #include "gromacs/mdtypes/forceoutput.h"
86 #include "gromacs/mdtypes/inputrec.h"
87 #include "gromacs/mdtypes/state_propagator_data_gpu.h"
88 #include "gromacs/timing/cyclecounter.h"
89 #include "gromacs/timing/wallcycle.h"
90 #include "gromacs/utility/fatalerror.h"
91 #include "gromacs/utility/futil.h"
92 #include "gromacs/utility/gmxmpi.h"
93 #include "gromacs/utility/gmxomp.h"
94 #include "gromacs/utility/smalloc.h"
96 #include "pme_gpu_internal.h"
97 #include "pme_internal.h"
98 #include "pme_pp_communication.h"
100 //! Contains information about the PP ranks that partner this PME rank.
103 //! The MPI rank ID of this partner PP rank.
105 //! The number of atoms to communicate with this partner PP rank.
109 /*! \brief Master PP-PME communication data structure */
111 MPI_Comm mpi_comm_mysim
; /**< MPI communicator for this simulation */
112 std::vector
<PpRanks
> ppRanks
; /**< The PP partner ranks */
113 int peerRankId
; /**< The peer PP rank id */
115 /**< Vectors of A- and B-state parameters used to transfer vectors to PME ranks */
116 gmx::PaddedHostVector
<real
> chargeA
;
117 std::vector
<real
> chargeB
;
118 std::vector
<real
> sqrt_c6A
;
119 std::vector
<real
> sqrt_c6B
;
120 std::vector
<real
> sigmaA
;
121 std::vector
<real
> sigmaB
;
123 gmx::HostVector
<gmx::RVec
> x
; /**< Vector of atom coordinates to transfer to PME ranks */
124 std::vector
<gmx::RVec
> f
; /**< Vector of atom forces received from PME ranks */
126 /**< Vectors of MPI objects used in non-blocking communication between multiple PP ranks per PME rank */
127 std::vector
<MPI_Request
> req
;
128 std::vector
<MPI_Status
> stat
;
132 /*! \brief Initialize the PME-only side of the PME <-> PP communication */
133 static std::unique_ptr
<gmx_pme_pp
> gmx_pme_pp_init(const t_commrec
*cr
)
135 auto pme_pp
= std::make_unique
<gmx_pme_pp
>();
140 pme_pp
->mpi_comm_mysim
= cr
->mpi_comm_mysim
;
141 MPI_Comm_rank(cr
->mpi_comm_mygroup
, &rank
);
142 auto ppRanks
= get_pme_ddranks(cr
, rank
);
143 pme_pp
->ppRanks
.reserve(ppRanks
.size());
144 for (const auto &ppRankId
: ppRanks
)
146 pme_pp
->ppRanks
.push_back({ppRankId
, 0});
148 // The peer PP rank is the last one.
149 pme_pp
->peerRankId
= pme_pp
->ppRanks
.back().rankId
;
150 pme_pp
->req
.resize(eCommType_NR
*pme_pp
->ppRanks
.size());
151 pme_pp
->stat
.resize(eCommType_NR
*pme_pp
->ppRanks
.size());
153 GMX_UNUSED_VALUE(cr
);
159 static void reset_pmeonly_counters(gmx_wallcycle_t wcycle
,
160 gmx_walltime_accounting_t walltime_accounting
,
165 /* Reset all the counters related to performance over the run */
166 wallcycle_stop(wcycle
, ewcRUN
);
167 wallcycle_reset_all(wcycle
);
169 wallcycle_start(wcycle
, ewcRUN
);
170 walltime_accounting_reset_time(walltime_accounting
, step
);
178 static gmx_pme_t
*gmx_pmeonly_switch(std::vector
<gmx_pme_t
*> *pmedata
,
179 const ivec grid_size
,
180 real ewaldcoeff_q
, real ewaldcoeff_lj
,
181 const t_commrec
*cr
, const t_inputrec
*ir
)
183 GMX_ASSERT(pmedata
, "Bad PME tuning list pointer");
184 for (auto &pme
: *pmedata
)
186 GMX_ASSERT(pme
, "Bad PME tuning list element pointer");
187 if (pme
->nkx
== grid_size
[XX
] &&
188 pme
->nky
== grid_size
[YY
] &&
189 pme
->nkz
== grid_size
[ZZ
])
191 /* Here we have found an existing PME data structure that suits us.
192 * However, in the GPU case, we have to reinitialize it - there's only one GPU structure.
193 * This should not cause actual GPU reallocations, at least (the allocated buffers are never shrunk).
194 * So, just some grid size updates in the GPU kernel parameters.
195 * TODO: this should be something like gmx_pme_update_split_params()
197 gmx_pme_reinit(&pme
, cr
, pme
, ir
, grid_size
, ewaldcoeff_q
, ewaldcoeff_lj
);
202 const auto &pme
= pmedata
->back();
203 gmx_pme_t
*newStructure
= nullptr;
204 // Copy last structure with new grid params
205 gmx_pme_reinit(&newStructure
, cr
, pme
, ir
, grid_size
, ewaldcoeff_q
, ewaldcoeff_lj
);
206 pmedata
->push_back(newStructure
);
210 /*! \brief Called by PME-only ranks to receive coefficients and coordinates
212 * \param[in,out] pme_pp PME-PP communication structure.
213 * \param[out] natoms Number of received atoms.
214 * \param[out] box System box, if received.
215 * \param[out] maxshift_x Maximum shift in X direction, if received.
216 * \param[out] maxshift_y Maximum shift in Y direction, if received.
217 * \param[out] lambda_q Free-energy lambda for electrostatics, if received.
218 * \param[out] lambda_lj Free-energy lambda for Lennard-Jones, if received.
219 * \param[out] bEnerVir Set to true if this is an energy/virial calculation step, otherwise set to false.
220 * \param[out] step MD integration step number.
221 * \param[out] grid_size PME grid size, if received.
222 * \param[out] ewaldcoeff_q Ewald cut-off parameter for electrostatics, if received.
223 * \param[out] ewaldcoeff_lj Ewald cut-off parameter for Lennard-Jones, if received.
224 * \param[out] atomSetChanged Set to true only if the local domain atom data (charges/coefficients)
225 * has been received (after DD) and should be reinitialized. Otherwise not changed.
227 * \retval pmerecvqxX All parameters were set, chargeA and chargeB can be NULL.
228 * \retval pmerecvqxFINISH No parameters were set.
229 * \retval pmerecvqxSWITCHGRID Only grid_size and *ewaldcoeff were set.
230 * \retval pmerecvqxRESETCOUNTERS *step was set.
232 static int gmx_pme_recv_coeffs_coords(gmx_pme_pp
*pme_pp
,
244 bool *atomSetChanged
)
250 unsigned int flags
= 0;
255 gmx_pme_comm_n_box_t cnb
;
258 /* Receive the send count, box and time step from the peer PP node */
259 MPI_Recv(&cnb
, sizeof(cnb
), MPI_BYTE
,
260 pme_pp
->peerRankId
, eCommType_CNB
,
261 pme_pp
->mpi_comm_mysim
, MPI_STATUS_IGNORE
);
263 /* We accumulate all received flags */
270 fprintf(debug
, "PME only rank receiving:%s%s%s%s%s\n",
271 (cnb
.flags
& PP_PME_CHARGE
) ? " charges" : "",
272 (cnb
.flags
& PP_PME_COORD
) ? " coordinates" : "",
273 (cnb
.flags
& PP_PME_FINISH
) ? " finish" : "",
274 (cnb
.flags
& PP_PME_SWITCHGRID
) ? " switch grid" : "",
275 (cnb
.flags
& PP_PME_RESETCOUNTERS
) ? " reset counters" : "");
278 if (cnb
.flags
& PP_PME_FINISH
)
280 status
= pmerecvqxFINISH
;
283 if (cnb
.flags
& PP_PME_SWITCHGRID
)
285 /* Special case, receive the new parameters and return */
286 copy_ivec(cnb
.grid_size
, *grid_size
);
287 *ewaldcoeff_q
= cnb
.ewaldcoeff_q
;
288 *ewaldcoeff_lj
= cnb
.ewaldcoeff_lj
;
290 status
= pmerecvqxSWITCHGRID
;
293 if (cnb
.flags
& PP_PME_RESETCOUNTERS
)
295 /* Special case, receive the step (set above) and return */
296 status
= pmerecvqxRESETCOUNTERS
;
299 if (cnb
.flags
& (PP_PME_CHARGE
| PP_PME_SQRTC6
| PP_PME_SIGMA
))
301 *atomSetChanged
= true;
303 /* Receive the send counts from the other PP nodes */
304 for (auto &sender
: pme_pp
->ppRanks
)
306 if (sender
.rankId
== pme_pp
->peerRankId
)
308 sender
.numAtoms
= cnb
.natoms
;
312 MPI_Irecv(&sender
.numAtoms
, sizeof(sender
.numAtoms
),
314 sender
.rankId
, eCommType_CNB
,
315 pme_pp
->mpi_comm_mysim
, &pme_pp
->req
[messages
++]);
318 MPI_Waitall(messages
, pme_pp
->req
.data(), pme_pp
->stat
.data());
322 for (const auto &sender
: pme_pp
->ppRanks
)
324 nat
+= sender
.numAtoms
;
327 if (cnb
.flags
& PP_PME_CHARGE
)
329 pme_pp
->chargeA
.resizeWithPadding(nat
);
331 if (cnb
.flags
& PP_PME_CHARGEB
)
333 pme_pp
->chargeB
.resize(nat
);
335 if (cnb
.flags
& PP_PME_SQRTC6
)
337 pme_pp
->sqrt_c6A
.resize(nat
);
339 if (cnb
.flags
& PP_PME_SQRTC6B
)
341 pme_pp
->sqrt_c6B
.resize(nat
);
343 if (cnb
.flags
& PP_PME_SIGMA
)
345 pme_pp
->sigmaA
.resize(nat
);
347 if (cnb
.flags
& PP_PME_SIGMAB
)
349 pme_pp
->sigmaB
.resize(nat
);
351 pme_pp
->x
.resize(nat
);
352 pme_pp
->f
.resize(nat
);
354 /* maxshift is sent when the charges are sent */
355 *maxshift_x
= cnb
.maxshift_x
;
356 *maxshift_y
= cnb
.maxshift_y
;
358 /* Receive the charges in place */
359 for (int q
= 0; q
< eCommType_NR
; q
++)
363 if (!(cnb
.flags
& (PP_PME_CHARGE
<<q
)))
369 case eCommType_ChargeA
: bufferPtr
= pme_pp
->chargeA
.data(); break;
370 case eCommType_ChargeB
: bufferPtr
= pme_pp
->chargeB
.data(); break;
371 case eCommType_SQRTC6A
: bufferPtr
= pme_pp
->sqrt_c6A
.data(); break;
372 case eCommType_SQRTC6B
: bufferPtr
= pme_pp
->sqrt_c6B
.data(); break;
373 case eCommType_SigmaA
: bufferPtr
= pme_pp
->sigmaA
.data(); break;
374 case eCommType_SigmaB
: bufferPtr
= pme_pp
->sigmaB
.data(); break;
375 default: gmx_incons("Wrong eCommType");
378 for (const auto &sender
: pme_pp
->ppRanks
)
380 if (sender
.numAtoms
> 0)
382 MPI_Irecv(bufferPtr
+nat
,
383 sender
.numAtoms
*sizeof(real
),
386 pme_pp
->mpi_comm_mysim
,
387 &pme_pp
->req
[messages
++]);
388 nat
+= sender
.numAtoms
;
391 fprintf(debug
, "Received from PP rank %d: %d %s\n",
392 sender
.rankId
, sender
.numAtoms
,
393 (q
== eCommType_ChargeA
||
394 q
== eCommType_ChargeB
) ? "charges" : "params");
401 if (cnb
.flags
& PP_PME_COORD
)
403 /* The box, FE flag and lambda are sent along with the coordinates
405 copy_mat(cnb
.box
, box
);
406 *lambda_q
= cnb
.lambda_q
;
407 *lambda_lj
= cnb
.lambda_lj
;
408 *bEnerVir
= ((cnb
.flags
& PP_PME_ENER_VIR
) != 0U);
411 /* Receive the coordinates in place */
413 for (const auto &sender
: pme_pp
->ppRanks
)
415 if (sender
.numAtoms
> 0)
417 MPI_Irecv(pme_pp
->x
[nat
],
418 sender
.numAtoms
*sizeof(rvec
),
420 sender
.rankId
, eCommType_COORD
,
421 pme_pp
->mpi_comm_mysim
, &pme_pp
->req
[messages
++]);
422 nat
+= sender
.numAtoms
;
425 fprintf(debug
, "Received from PP rank %d: %d "
427 sender
.rankId
, sender
.numAtoms
);
435 /* Wait for the coordinates and/or charges to arrive */
436 MPI_Waitall(messages
, pme_pp
->req
.data(), pme_pp
->stat
.data());
439 while (status
== -1);
441 GMX_UNUSED_VALUE(pme_pp
);
442 GMX_UNUSED_VALUE(box
);
443 GMX_UNUSED_VALUE(maxshift_x
);
444 GMX_UNUSED_VALUE(maxshift_y
);
445 GMX_UNUSED_VALUE(lambda_q
);
446 GMX_UNUSED_VALUE(lambda_lj
);
447 GMX_UNUSED_VALUE(bEnerVir
);
448 GMX_UNUSED_VALUE(step
);
449 GMX_UNUSED_VALUE(grid_size
);
450 GMX_UNUSED_VALUE(ewaldcoeff_q
);
451 GMX_UNUSED_VALUE(ewaldcoeff_lj
);
452 GMX_UNUSED_VALUE(atomSetChanged
);
457 if (status
== pmerecvqxX
)
465 /*! \brief Send the PME mesh force, virial and energy to the PP-only ranks. */
466 static void gmx_pme_send_force_vir_ener(gmx_pme_pp
*pme_pp
,
467 const PmeOutput
&output
,
468 real dvdlambda_q
, real dvdlambda_lj
,
472 gmx_pme_comm_vir_ene_t cve
;
473 int messages
, ind_start
, ind_end
;
476 /* Now the evaluated forces have to be transferred to the PP nodes */
479 for (const auto &receiver
: pme_pp
->ppRanks
)
482 ind_end
= ind_start
+ receiver
.numAtoms
;
483 if (MPI_Isend(const_cast<void *>(static_cast<const void *>(output
.forces_
[ind_start
])),
484 (ind_end
-ind_start
)*sizeof(rvec
), MPI_BYTE
,
486 pme_pp
->mpi_comm_mysim
, &pme_pp
->req
[messages
++]) != 0)
488 gmx_comm("MPI_Isend failed in do_pmeonly");
492 /* send virial and energy to our last PP node */
493 copy_mat(output
.coulombVirial_
, cve
.vir_q
);
494 copy_mat(output
.lennardJonesVirial_
, cve
.vir_lj
);
495 cve
.energy_q
= output
.coulombEnergy_
;
496 cve
.energy_lj
= output
.lennardJonesEnergy_
;
497 cve
.dvdlambda_q
= dvdlambda_q
;
498 cve
.dvdlambda_lj
= dvdlambda_lj
;
499 /* check for the signals to send back to a PP node */
500 cve
.stop_cond
= gmx_get_stop_condition();
506 fprintf(debug
, "PME rank sending to PP rank %d: virial and energy\n",
509 MPI_Isend(&cve
, sizeof(cve
), MPI_BYTE
,
510 pme_pp
->peerRankId
, 1,
511 pme_pp
->mpi_comm_mysim
, &pme_pp
->req
[messages
++]);
513 /* Wait for the forces to arrive */
514 MPI_Waitall(messages
, pme_pp
->req
.data(), pme_pp
->stat
.data());
516 gmx_call("MPI not enabled");
517 GMX_UNUSED_VALUE(pme_pp
);
518 GMX_UNUSED_VALUE(output
);
519 GMX_UNUSED_VALUE(dvdlambda_q
);
520 GMX_UNUSED_VALUE(dvdlambda_lj
);
521 GMX_UNUSED_VALUE(cycles
);
525 int gmx_pmeonly(struct gmx_pme_t
*pme
,
526 const t_commrec
*cr
, t_nrnb
*mynrnb
,
527 gmx_wallcycle
*wcycle
,
528 gmx_walltime_accounting_t walltime_accounting
,
529 t_inputrec
*ir
, PmeRunMode runMode
)
536 int maxshift_x
= 0, maxshift_y
= 0;
537 real dvdlambda_q
, dvdlambda_lj
;
540 gmx_bool bEnerVir
= FALSE
;
543 /* This data will only use with PME tuning, i.e. switching PME grids */
544 std::vector
<gmx_pme_t
*> pmedata
;
545 pmedata
.push_back(pme
);
547 auto pme_pp
= gmx_pme_pp_init(cr
);
548 //TODO the variable below should be queried from the task assignment info
549 const bool useGpuForPme
= (runMode
== PmeRunMode::GPU
) || (runMode
== PmeRunMode::Mixed
);
550 const void *commandStream
= useGpuForPme
? pme_gpu_get_device_context(pme
) : nullptr;
551 const void *gpuContext
= useGpuForPme
? pme_gpu_get_device_stream(pme
) : nullptr;
552 const int paddingSize
= pme_gpu_get_padding_size(pme
);
555 changePinningPolicy(&pme_pp
->chargeA
, pme_get_pinning_policy());
556 changePinningPolicy(&pme_pp
->x
, pme_get_pinning_policy());
559 // Unconditionally initialize the StatePropagatorDataGpu object to get more verbose message if it is used from CPU builds
560 auto stateGpu
= std::make_unique
<gmx::StatePropagatorDataGpu
>(commandStream
, gpuContext
, GpuApiCallBehavior::Sync
, paddingSize
);
565 do /****** this is a quasi-loop over time steps! */
567 /* The reason for having a loop here is PME grid tuning/switching */
570 /* Domain decomposition */
572 bool atomSetChanged
= false;
573 real ewaldcoeff_q
= 0, ewaldcoeff_lj
= 0;
574 ret
= gmx_pme_recv_coeffs_coords(pme_pp
.get(),
577 &maxshift_x
, &maxshift_y
,
578 &lambda_q
, &lambda_lj
,
586 if (ret
== pmerecvqxSWITCHGRID
)
588 /* Switch the PME grid to newGridSize */
589 pme
= gmx_pmeonly_switch(&pmedata
, newGridSize
, ewaldcoeff_q
, ewaldcoeff_lj
, cr
, ir
);
594 gmx_pme_reinit_atoms(pme
, natoms
, pme_pp
->chargeA
.data());
597 stateGpu
->reinit(natoms
, natoms
);
598 pme_gpu_set_device_x(pme
, stateGpu
->getCoordinates());
602 if (ret
== pmerecvqxRESETCOUNTERS
)
604 /* Reset the cycle and flop counters */
605 reset_pmeonly_counters(wcycle
, walltime_accounting
, mynrnb
, step
, useGpuForPme
);
608 while (ret
== pmerecvqxSWITCHGRID
|| ret
== pmerecvqxRESETCOUNTERS
);
610 if (ret
== pmerecvqxFINISH
)
612 /* We should stop: break out of the loop */
618 wallcycle_start(wcycle
, ewcRUN
);
619 walltime_accounting_start_time(walltime_accounting
);
622 wallcycle_start(wcycle
, ewcPMEMESH
);
627 // TODO Make a struct of array refs onto these per-atom fields
628 // of pme_pp (maybe box, energy and virial, too; and likewise
629 // from mdatoms for the other call to gmx_pme_do), so we have
630 // fewer lines of code and less parameter passing.
631 const int pmeFlags
= GMX_PME_DO_ALL_F
| (bEnerVir
? GMX_PME_CALC_ENER_VIR
: 0);
632 PmeOutput output
= {{}, false, 0, {{0}}, 0, {{0}}};
635 const bool boxChanged
= false;
636 const bool useGpuPmeForceReduction
= false;
637 //TODO this should be set properly by gmx_pme_recv_coeffs_coords,
638 // or maybe use inputrecDynamicBox(ir), at the very least - change this when this codepath is tested!
639 pme_gpu_prepare_computation(pme
, boxChanged
, box
, wcycle
, pmeFlags
, useGpuPmeForceReduction
);
640 stateGpu
->copyCoordinatesToGpu(gmx::ArrayRef
<gmx::RVec
>(pme_pp
->x
), gmx::StatePropagatorDataGpu::AtomLocality::All
);
642 pme_gpu_launch_spread(pme
, wcycle
);
643 pme_gpu_launch_complex_transforms(pme
, wcycle
);
644 pme_gpu_launch_gather(pme
, wcycle
, PmeForceOutputHandling::Set
);
645 output
= pme_gpu_wait_finish_task(pme
, pmeFlags
, wcycle
);
646 pme_gpu_reinit_computation(pme
, wcycle
);
650 GMX_ASSERT(pme_pp
->x
.size() == static_cast<size_t>(natoms
), "The coordinate buffer should have size natoms");
652 gmx_pme_do(pme
, pme_pp
->x
, pme_pp
->f
,
653 pme_pp
->chargeA
.data(), pme_pp
->chargeB
.data(),
654 pme_pp
->sqrt_c6A
.data(), pme_pp
->sqrt_c6B
.data(),
655 pme_pp
->sigmaA
.data(), pme_pp
->sigmaB
.data(), box
,
656 cr
, maxshift_x
, maxshift_y
, mynrnb
, wcycle
,
657 output
.coulombVirial_
, output
.lennardJonesVirial_
,
658 &output
.coulombEnergy_
, &output
.lennardJonesEnergy_
,
659 lambda_q
, lambda_lj
, &dvdlambda_q
, &dvdlambda_lj
,
661 output
.forces_
= pme_pp
->f
;
664 cycles
= wallcycle_stop(wcycle
, ewcPMEMESH
);
666 gmx_pme_send_force_vir_ener(pme_pp
.get(), output
,
667 dvdlambda_q
, dvdlambda_lj
, cycles
);
670 } /***** end of quasi-loop, we stop with the break above */
673 walltime_accounting_end_time(walltime_accounting
);