2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/essentialdynamics/edsam.h"
53 #include "gromacs/ewald/pme.h"
54 #include "gromacs/fileio/copyrite.h"
55 #include "gromacs/fileio/txtdump.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/disre.h"
58 #include "gromacs/gmxlib/gmx_omp_nthreads.h"
59 #include "gromacs/gmxlib/network.h"
60 #include "gromacs/gmxlib/nrnb.h"
61 #include "gromacs/gmxlib/orires.h"
62 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
63 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
64 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
65 #include "gromacs/imd/imd.h"
66 #include "gromacs/legacyheaders/types/commrec.h"
67 #include "gromacs/listed-forces/bonded.h"
68 #include "gromacs/math/units.h"
69 #include "gromacs/math/vec.h"
70 #include "gromacs/mdlib/calcmu.h"
71 #include "gromacs/mdlib/constr.h"
72 #include "gromacs/mdlib/force.h"
73 #include "gromacs/mdlib/forcerec.h"
74 #include "gromacs/mdlib/genborn.h"
75 #include "gromacs/mdlib/mdrun.h"
76 #include "gromacs/mdlib/nb_verlet.h"
77 #include "gromacs/mdlib/nbnxn_atomdata.h"
78 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
79 #include "gromacs/mdlib/nbnxn_grid.h"
80 #include "gromacs/mdlib/nbnxn_search.h"
81 #include "gromacs/mdlib/qmmm.h"
82 #include "gromacs/mdlib/update.h"
83 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
84 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
85 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
86 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/mdtypes/md_enums.h"
89 #include "gromacs/pbcutil/ishift.h"
90 #include "gromacs/pbcutil/mshift.h"
91 #include "gromacs/pbcutil/pbc.h"
92 #include "gromacs/pulling/pull.h"
93 #include "gromacs/pulling/pull_rotation.h"
94 #include "gromacs/timing/cyclecounter.h"
95 #include "gromacs/timing/gpu_timing.h"
96 #include "gromacs/timing/wallcycle.h"
97 #include "gromacs/timing/wallcyclereporting.h"
98 #include "gromacs/timing/walltime_accounting.h"
99 #include "gromacs/utility/basedefinitions.h"
100 #include "gromacs/utility/cstringutil.h"
101 #include "gromacs/utility/exceptions.h"
102 #include "gromacs/utility/fatalerror.h"
103 #include "gromacs/utility/gmxassert.h"
104 #include "gromacs/utility/gmxmpi.h"
105 #include "gromacs/utility/smalloc.h"
106 #include "gromacs/utility/sysinfo.h"
108 #include "nbnxn_gpu.h"
111 #if defined(GMX_USE_OPENCL)
112 // Have OpenCL support
113 static const bool useCuda
= false;
114 static const bool useOpenCL
= true;
117 static const bool useCuda
= true;
118 static const bool useOpenCL
= false;
122 static const bool useCuda
= false;
123 static const bool useOpenCL
= false;
126 void print_time(FILE *out
,
127 gmx_walltime_accounting_t walltime_accounting
,
130 t_commrec gmx_unused
*cr
)
133 char timebuf
[STRLEN
];
134 double dt
, elapsed_seconds
, time_per_step
;
137 #ifndef GMX_THREAD_MPI
143 fprintf(out
, "step %s", gmx_step_str(step
, buf
));
144 if ((step
>= ir
->nstlist
))
146 double seconds_since_epoch
= gmx_gettime();
147 elapsed_seconds
= seconds_since_epoch
- walltime_accounting_get_start_time_stamp(walltime_accounting
);
148 time_per_step
= elapsed_seconds
/(step
- ir
->init_step
+ 1);
149 dt
= (ir
->nsteps
+ ir
->init_step
- step
) * time_per_step
;
155 finish
= (time_t) (seconds_since_epoch
+ dt
);
156 gmx_ctime_r(&finish
, timebuf
, STRLEN
);
157 sprintf(buf
, "%s", timebuf
);
158 buf
[strlen(buf
)-1] = '\0';
159 fprintf(out
, ", will finish %s", buf
);
163 fprintf(out
, ", remaining wall clock time: %5d s ", (int)dt
);
168 fprintf(out
, " performance: %.1f ns/day ",
169 ir
->delta_t
/1000*24*60*60/time_per_step
);
172 #ifndef GMX_THREAD_MPI
182 void print_date_and_time(FILE *fplog
, int nodeid
, const char *title
,
185 char time_string
[STRLEN
];
194 char timebuf
[STRLEN
];
195 time_t temp_time
= (time_t) the_time
;
197 gmx_ctime_r(&temp_time
, timebuf
, STRLEN
);
198 for (i
= 0; timebuf
[i
] >= ' '; i
++)
200 time_string
[i
] = timebuf
[i
];
202 time_string
[i
] = '\0';
205 fprintf(fplog
, "%s on rank %d %s\n", title
, nodeid
, time_string
);
208 void print_start(FILE *fplog
, t_commrec
*cr
,
209 gmx_walltime_accounting_t walltime_accounting
,
214 sprintf(buf
, "Started %s", name
);
215 print_date_and_time(fplog
, cr
->nodeid
, buf
,
216 walltime_accounting_get_start_time_stamp(walltime_accounting
));
219 static void sum_forces(int start
, int end
, rvec f
[], rvec flr
[])
225 pr_rvecs(debug
, 0, "fsr", f
+start
, end
-start
);
226 pr_rvecs(debug
, 0, "flr", flr
+start
, end
-start
);
228 for (i
= start
; (i
< end
); i
++)
230 rvec_inc(f
[i
], flr
[i
]);
235 * calc_f_el calculates forces due to an electric field.
237 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
239 * Et[] contains the parameters for the time dependent
241 * Ex[] contains the parameters for
242 * the spatial dependent part of the field.
243 * The function should return the energy due to the electric field
244 * (if any) but for now returns 0.
247 * There can be problems with the virial.
248 * Since the field is not self-consistent this is unavoidable.
249 * For neutral molecules the virial is correct within this approximation.
250 * For neutral systems with many charged molecules the error is small.
251 * But for systems with a net charge or a few charged molecules
252 * the error can be significant when the field is high.
253 * Solution: implement a self-consistent electric field into PME.
255 static void calc_f_el(FILE *fp
, int start
, int homenr
,
256 real charge
[], rvec f
[],
257 t_cosines Ex
[], t_cosines Et
[], double t
)
263 for (m
= 0; (m
< DIM
); m
++)
270 Ext
[m
] = cos(Et
[m
].a
[0]*(t
-t0
))*exp(-sqr(t
-t0
)/(2.0*sqr(Et
[m
].a
[2])));
274 Ext
[m
] = cos(Et
[m
].a
[0]*t
);
283 /* Convert the field strength from V/nm to MD-units */
284 Ext
[m
] *= Ex
[m
].a
[0]*FIELDFAC
;
285 for (i
= start
; (i
< start
+homenr
); i
++)
287 f
[i
][m
] += charge
[i
]*Ext
[m
];
297 fprintf(fp
, "%10g %10g %10g %10g #FIELD\n", t
,
298 Ext
[XX
]/FIELDFAC
, Ext
[YY
]/FIELDFAC
, Ext
[ZZ
]/FIELDFAC
);
302 static void calc_virial(int start
, int homenr
, rvec x
[], rvec f
[],
303 tensor vir_part
, t_graph
*graph
, matrix box
,
304 t_nrnb
*nrnb
, const t_forcerec
*fr
, int ePBC
)
308 /* The short-range virial from surrounding boxes */
310 calc_vir(SHIFTS
, fr
->shift_vec
, fr
->fshift
, vir_part
, ePBC
== epbcSCREW
, box
);
311 inc_nrnb(nrnb
, eNR_VIRIAL
, SHIFTS
);
313 /* Calculate partial virial, for local atoms only, based on short range.
314 * Total virial is computed in global_stat, called from do_md
316 f_calc_vir(start
, start
+homenr
, x
, f
, vir_part
, graph
, box
);
317 inc_nrnb(nrnb
, eNR_VIRIAL
, homenr
);
319 /* Add position restraint contribution */
320 for (i
= 0; i
< DIM
; i
++)
322 vir_part
[i
][i
] += fr
->vir_diag_posres
[i
];
325 /* Add wall contribution */
326 for (i
= 0; i
< DIM
; i
++)
328 vir_part
[i
][ZZ
] += fr
->vir_wall_z
[i
];
333 pr_rvecs(debug
, 0, "vir_part", vir_part
, DIM
);
337 static void pull_potential_wrapper(t_commrec
*cr
,
339 matrix box
, rvec x
[],
343 gmx_enerdata_t
*enerd
,
346 gmx_wallcycle_t wcycle
)
351 /* Calculate the center of mass forces, this requires communication,
352 * which is why pull_potential is called close to other communication.
353 * The virial contribution is calculated directly,
354 * which is why we call pull_potential after calc_virial.
356 wallcycle_start(wcycle
, ewcPULLPOT
);
357 set_pbc(&pbc
, ir
->ePBC
, box
);
359 enerd
->term
[F_COM_PULL
] +=
360 pull_potential(ir
->pull_work
, mdatoms
, &pbc
,
361 cr
, t
, lambda
[efptRESTRAINT
], x
, f
, vir_force
, &dvdl
);
362 enerd
->dvdl_lin
[efptRESTRAINT
] += dvdl
;
363 wallcycle_stop(wcycle
, ewcPULLPOT
);
366 static void pme_receive_force_ener(t_commrec
*cr
,
367 gmx_wallcycle_t wcycle
,
368 gmx_enerdata_t
*enerd
,
371 real e_q
, e_lj
, dvdl_q
, dvdl_lj
;
372 float cycles_ppdpme
, cycles_seppme
;
374 cycles_ppdpme
= wallcycle_stop(wcycle
, ewcPPDURINGPME
);
375 dd_cycles_add(cr
->dd
, cycles_ppdpme
, ddCyclPPduringPME
);
377 /* In case of node-splitting, the PP nodes receive the long-range
378 * forces, virial and energy from the PME nodes here.
380 wallcycle_start(wcycle
, ewcPP_PMEWAITRECVF
);
383 gmx_pme_receive_f(cr
, fr
->f_novirsum
, fr
->vir_el_recip
, &e_q
,
384 fr
->vir_lj_recip
, &e_lj
, &dvdl_q
, &dvdl_lj
,
386 enerd
->term
[F_COUL_RECIP
] += e_q
;
387 enerd
->term
[F_LJ_RECIP
] += e_lj
;
388 enerd
->dvdl_lin
[efptCOUL
] += dvdl_q
;
389 enerd
->dvdl_lin
[efptVDW
] += dvdl_lj
;
393 dd_cycles_add(cr
->dd
, cycles_seppme
, ddCyclPME
);
395 wallcycle_stop(wcycle
, ewcPP_PMEWAITRECVF
);
398 static void print_large_forces(FILE *fp
, t_mdatoms
*md
, t_commrec
*cr
,
399 gmx_int64_t step
, real pforce
, rvec
*x
, rvec
*f
)
403 char buf
[STEPSTRSIZE
];
406 for (i
= 0; i
< md
->homenr
; i
++)
409 /* We also catch NAN, if the compiler does not optimize this away. */
410 if (fn2
>= pf2
|| fn2
!= fn2
)
412 fprintf(fp
, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
413 gmx_step_str(step
, buf
),
414 ddglatnr(cr
->dd
, i
), x
[i
][XX
], x
[i
][YY
], x
[i
][ZZ
], sqrt(fn2
));
419 static void post_process_forces(t_commrec
*cr
,
421 t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
423 matrix box
, rvec x
[],
428 t_forcerec
*fr
, gmx_vsite_t
*vsite
,
435 /* Spread the mesh force on virtual sites to the other particles...
436 * This is parallellized. MPI communication is performed
437 * if the constructing atoms aren't local.
439 wallcycle_start(wcycle
, ewcVSITESPREAD
);
440 spread_vsite_f(vsite
, x
, fr
->f_novirsum
, NULL
,
441 (flags
& GMX_FORCE_VIRIAL
), fr
->vir_el_recip
,
443 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
444 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
446 if (flags
& GMX_FORCE_VIRIAL
)
448 /* Now add the forces, this is local */
451 sum_forces(0, fr
->f_novirsum_n
, f
, fr
->f_novirsum
);
455 sum_forces(0, mdatoms
->homenr
,
458 if (EEL_FULL(fr
->eeltype
))
460 /* Add the mesh contribution to the virial */
461 m_add(vir_force
, fr
->vir_el_recip
, vir_force
);
463 if (EVDW_PME(fr
->vdwtype
))
465 /* Add the mesh contribution to the virial */
466 m_add(vir_force
, fr
->vir_lj_recip
, vir_force
);
470 pr_rvecs(debug
, 0, "vir_force", vir_force
, DIM
);
475 if (fr
->print_force
>= 0)
477 print_large_forces(stderr
, mdatoms
, cr
, step
, fr
->print_force
, x
, f
);
481 static void do_nb_verlet(t_forcerec
*fr
,
482 interaction_const_t
*ic
,
483 gmx_enerdata_t
*enerd
,
484 int flags
, int ilocality
,
487 gmx_wallcycle_t wcycle
)
489 int enr_nbnxn_kernel_ljc
, enr_nbnxn_kernel_lj
;
490 nonbonded_verlet_group_t
*nbvg
;
491 gmx_bool bUsingGpuKernels
;
493 if (!(flags
& GMX_FORCE_NONBONDED
))
495 /* skip non-bonded calculation */
499 nbvg
= &fr
->nbv
->grp
[ilocality
];
501 /* GPU kernel launch overhead is already timed separately */
502 if (fr
->cutoff_scheme
!= ecutsVERLET
)
504 gmx_incons("Invalid cut-off scheme passed!");
507 bUsingGpuKernels
= (nbvg
->kernel_type
== nbnxnk8x8x8_GPU
);
509 if (!bUsingGpuKernels
)
511 wallcycle_sub_start(wcycle
, ewcsNONBONDED
);
513 switch (nbvg
->kernel_type
)
515 case nbnxnk4x4_PlainC
:
516 nbnxn_kernel_ref(&nbvg
->nbl_lists
,
522 enerd
->grpp
.ener
[egCOULSR
],
524 enerd
->grpp
.ener
[egBHAMSR
] :
525 enerd
->grpp
.ener
[egLJSR
]);
528 case nbnxnk4xN_SIMD_4xN
:
529 nbnxn_kernel_simd_4xn(&nbvg
->nbl_lists
,
536 enerd
->grpp
.ener
[egCOULSR
],
538 enerd
->grpp
.ener
[egBHAMSR
] :
539 enerd
->grpp
.ener
[egLJSR
]);
541 case nbnxnk4xN_SIMD_2xNN
:
542 nbnxn_kernel_simd_2xnn(&nbvg
->nbl_lists
,
549 enerd
->grpp
.ener
[egCOULSR
],
551 enerd
->grpp
.ener
[egBHAMSR
] :
552 enerd
->grpp
.ener
[egLJSR
]);
555 case nbnxnk8x8x8_GPU
:
556 nbnxn_gpu_launch_kernel(fr
->nbv
->gpu_nbv
, nbvg
->nbat
, flags
, ilocality
);
559 case nbnxnk8x8x8_PlainC
:
560 nbnxn_kernel_gpu_ref(nbvg
->nbl_lists
.nbl
[0],
565 nbvg
->nbat
->out
[0].f
,
567 enerd
->grpp
.ener
[egCOULSR
],
569 enerd
->grpp
.ener
[egBHAMSR
] :
570 enerd
->grpp
.ener
[egLJSR
]);
574 gmx_incons("Invalid nonbonded kernel type passed!");
577 if (!bUsingGpuKernels
)
579 wallcycle_sub_stop(wcycle
, ewcsNONBONDED
);
582 if (EEL_RF(ic
->eeltype
) || ic
->eeltype
== eelCUT
)
584 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_RF
;
586 else if ((!bUsingGpuKernels
&& nbvg
->ewald_excl
== ewaldexclAnalytical
) ||
587 (bUsingGpuKernels
&& nbnxn_gpu_is_kernel_ewald_analytical(fr
->nbv
->gpu_nbv
)))
589 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_EWALD
;
593 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_TAB
;
595 enr_nbnxn_kernel_lj
= eNR_NBNXN_LJ
;
596 if (flags
& GMX_FORCE_ENERGY
)
598 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
599 enr_nbnxn_kernel_ljc
+= 1;
600 enr_nbnxn_kernel_lj
+= 1;
603 inc_nrnb(nrnb
, enr_nbnxn_kernel_ljc
,
604 nbvg
->nbl_lists
.natpair_ljq
);
605 inc_nrnb(nrnb
, enr_nbnxn_kernel_lj
,
606 nbvg
->nbl_lists
.natpair_lj
);
607 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
608 inc_nrnb(nrnb
, enr_nbnxn_kernel_ljc
-eNR_NBNXN_LJ_RF
+eNR_NBNXN_RF
,
609 nbvg
->nbl_lists
.natpair_q
);
611 if (ic
->vdw_modifier
== eintmodFORCESWITCH
)
613 /* We add up the switch cost separately */
614 inc_nrnb(nrnb
, eNR_NBNXN_ADD_LJ_FSW
+((flags
& GMX_FORCE_ENERGY
) ? 1 : 0),
615 nbvg
->nbl_lists
.natpair_ljq
+ nbvg
->nbl_lists
.natpair_lj
);
617 if (ic
->vdw_modifier
== eintmodPOTSWITCH
)
619 /* We add up the switch cost separately */
620 inc_nrnb(nrnb
, eNR_NBNXN_ADD_LJ_PSW
+((flags
& GMX_FORCE_ENERGY
) ? 1 : 0),
621 nbvg
->nbl_lists
.natpair_ljq
+ nbvg
->nbl_lists
.natpair_lj
);
623 if (ic
->vdwtype
== evdwPME
)
625 /* We add up the LJ Ewald cost separately */
626 inc_nrnb(nrnb
, eNR_NBNXN_ADD_LJ_EWALD
+((flags
& GMX_FORCE_ENERGY
) ? 1 : 0),
627 nbvg
->nbl_lists
.natpair_ljq
+ nbvg
->nbl_lists
.natpair_lj
);
631 static void do_nb_verlet_fep(nbnxn_pairlist_set_t
*nbl_lists
,
638 gmx_enerdata_t
*enerd
,
641 gmx_wallcycle_t wcycle
)
644 nb_kernel_data_t kernel_data
;
646 real dvdl_nb
[efptNR
];
651 /* Add short-range interactions */
652 donb_flags
|= GMX_NONBONDED_DO_SR
;
654 /* Currently all group scheme kernels always calculate (shift-)forces */
655 if (flags
& GMX_FORCE_FORCES
)
657 donb_flags
|= GMX_NONBONDED_DO_FORCE
;
659 if (flags
& GMX_FORCE_VIRIAL
)
661 donb_flags
|= GMX_NONBONDED_DO_SHIFTFORCE
;
663 if (flags
& GMX_FORCE_ENERGY
)
665 donb_flags
|= GMX_NONBONDED_DO_POTENTIAL
;
667 if (flags
& GMX_FORCE_DO_LR
)
669 donb_flags
|= GMX_NONBONDED_DO_LR
;
672 kernel_data
.flags
= donb_flags
;
673 kernel_data
.lambda
= lambda
;
674 kernel_data
.dvdl
= dvdl_nb
;
676 kernel_data
.energygrp_elec
= enerd
->grpp
.ener
[egCOULSR
];
677 kernel_data
.energygrp_vdw
= enerd
->grpp
.ener
[egLJSR
];
679 /* reset free energy components */
680 for (i
= 0; i
< efptNR
; i
++)
685 assert(gmx_omp_nthreads_get(emntNonbonded
) == nbl_lists
->nnbl
);
687 wallcycle_sub_start(wcycle
, ewcsNONBONDED
);
688 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
689 for (th
= 0; th
< nbl_lists
->nnbl
; th
++)
693 gmx_nb_free_energy_kernel(nbl_lists
->nbl_fep
[th
],
694 x
, f
, fr
, mdatoms
, &kernel_data
, nrnb
);
696 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
699 if (fepvals
->sc_alpha
!= 0)
701 enerd
->dvdl_nonlin
[efptVDW
] += dvdl_nb
[efptVDW
];
702 enerd
->dvdl_nonlin
[efptCOUL
] += dvdl_nb
[efptCOUL
];
706 enerd
->dvdl_lin
[efptVDW
] += dvdl_nb
[efptVDW
];
707 enerd
->dvdl_lin
[efptCOUL
] += dvdl_nb
[efptCOUL
];
710 /* If we do foreign lambda and we have soft-core interactions
711 * we have to recalculate the (non-linear) energies contributions.
713 if (fepvals
->n_lambda
> 0 && (flags
& GMX_FORCE_DHDL
) && fepvals
->sc_alpha
!= 0)
715 kernel_data
.flags
= (donb_flags
& ~(GMX_NONBONDED_DO_FORCE
| GMX_NONBONDED_DO_SHIFTFORCE
)) | GMX_NONBONDED_DO_FOREIGNLAMBDA
;
716 kernel_data
.lambda
= lam_i
;
717 kernel_data
.energygrp_elec
= enerd
->foreign_grpp
.ener
[egCOULSR
];
718 kernel_data
.energygrp_vdw
= enerd
->foreign_grpp
.ener
[egLJSR
];
719 /* Note that we add to kernel_data.dvdl, but ignore the result */
721 for (i
= 0; i
< enerd
->n_lambda
; i
++)
723 for (j
= 0; j
< efptNR
; j
++)
725 lam_i
[j
] = (i
== 0 ? lambda
[j
] : fepvals
->all_lambda
[j
][i
-1]);
727 reset_foreign_enerdata(enerd
);
728 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
729 for (th
= 0; th
< nbl_lists
->nnbl
; th
++)
733 gmx_nb_free_energy_kernel(nbl_lists
->nbl_fep
[th
],
734 x
, f
, fr
, mdatoms
, &kernel_data
, nrnb
);
736 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
739 sum_epot(&(enerd
->foreign_grpp
), enerd
->foreign_term
);
740 enerd
->enerpart_lambda
[i
] += enerd
->foreign_term
[F_EPOT
];
744 wallcycle_sub_stop(wcycle
, ewcsNONBONDED
);
747 gmx_bool
use_GPU(const nonbonded_verlet_t
*nbv
)
749 return nbv
!= NULL
&& nbv
->bUseGPU
;
752 void do_force_cutsVERLET(FILE *fplog
, t_commrec
*cr
,
753 t_inputrec
*inputrec
,
754 gmx_int64_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
756 gmx_groups_t gmx_unused
*groups
,
757 matrix box
, rvec x
[], history_t
*hist
,
761 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
762 real
*lambda
, t_graph
*graph
,
763 t_forcerec
*fr
, interaction_const_t
*ic
,
764 gmx_vsite_t
*vsite
, rvec mu_tot
,
765 double t
, FILE *field
, gmx_edsam_t ed
,
772 gmx_bool bStateChanged
, bNS
, bFillGrid
, bCalcCGCM
;
773 gmx_bool bDoLongRange
, bDoForces
, bSepLRF
, bUseGPU
, bUseOrEmulGPU
;
774 gmx_bool bDiffKernels
= FALSE
;
775 rvec vzero
, box_diag
;
776 float cycles_pme
, cycles_force
, cycles_wait_gpu
;
777 /* TODO To avoid loss of precision, float can't be used for a
778 * cycle count. Build an object that can do this right and perhaps
779 * also be used by gmx_wallcycle_t */
780 gmx_cycles_t cycleCountBeforeLocalWorkCompletes
= 0;
781 nonbonded_verlet_t
*nbv
;
788 homenr
= mdatoms
->homenr
;
790 clear_mat(vir_force
);
792 if (DOMAINDECOMP(cr
))
794 cg1
= cr
->dd
->ncg_tot
;
805 bStateChanged
= (flags
& GMX_FORCE_STATECHANGED
);
806 bNS
= (flags
& GMX_FORCE_NS
) && (fr
->bAllvsAll
== FALSE
);
807 bFillGrid
= (bNS
&& bStateChanged
);
808 bCalcCGCM
= (bFillGrid
&& !DOMAINDECOMP(cr
));
809 bDoLongRange
= (fr
->bTwinRange
&& bNS
&& (flags
& GMX_FORCE_DO_LR
));
810 bDoForces
= (flags
& GMX_FORCE_FORCES
);
811 bSepLRF
= (bDoLongRange
&& bDoForces
&& (flags
& GMX_FORCE_SEPLRF
));
812 bUseGPU
= fr
->nbv
->bUseGPU
;
813 bUseOrEmulGPU
= bUseGPU
|| (nbv
->grp
[0].kernel_type
== nbnxnk8x8x8_PlainC
);
817 update_forcerec(fr
, box
);
819 if (inputrecNeedMutot(inputrec
))
821 /* Calculate total (local) dipole moment in a temporary common array.
822 * This makes it possible to sum them over nodes faster.
824 calc_mu(start
, homenr
,
825 x
, mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
830 if (fr
->ePBC
!= epbcNONE
)
832 /* Compute shift vectors every step,
833 * because of pressure coupling or box deformation!
835 if ((flags
& GMX_FORCE_DYNAMICBOX
) && bStateChanged
)
837 calc_shifts(box
, fr
->shift_vec
);
842 put_atoms_in_box_omp(fr
->ePBC
, box
, homenr
, x
);
843 inc_nrnb(nrnb
, eNR_SHIFTX
, homenr
);
845 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
847 unshift_self(graph
, box
, x
);
851 nbnxn_atomdata_copy_shiftvec(flags
& GMX_FORCE_DYNAMICBOX
,
852 fr
->shift_vec
, nbv
->grp
[0].nbat
);
855 if (!(cr
->duty
& DUTY_PME
))
860 /* Send particle coordinates to the pme nodes.
861 * Since this is only implemented for domain decomposition
862 * and domain decomposition does not use the graph,
863 * we do not need to worry about shifting.
868 wallcycle_start(wcycle
, ewcPP_PMESENDX
);
870 bBS
= (inputrec
->nwall
== 2);
874 svmul(inputrec
->wall_ewald_zfac
, boxs
[ZZ
], boxs
[ZZ
]);
877 if (EEL_PME(fr
->eeltype
))
879 pme_flags
|= GMX_PME_DO_COULOMB
;
882 if (EVDW_PME(fr
->vdwtype
))
884 pme_flags
|= GMX_PME_DO_LJ
;
887 gmx_pme_send_coordinates(cr
, bBS
? boxs
: box
, x
,
888 mdatoms
->nChargePerturbed
, mdatoms
->nTypePerturbed
, lambda
[efptCOUL
], lambda
[efptVDW
],
889 (flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)),
892 wallcycle_stop(wcycle
, ewcPP_PMESENDX
);
896 /* do gridding for pair search */
899 if (graph
&& bStateChanged
)
901 /* Calculate intramolecular shift vectors to make molecules whole */
902 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
906 box_diag
[XX
] = box
[XX
][XX
];
907 box_diag
[YY
] = box
[YY
][YY
];
908 box_diag
[ZZ
] = box
[ZZ
][ZZ
];
910 wallcycle_start(wcycle
, ewcNS
);
913 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_LOCAL
);
914 nbnxn_put_on_grid(nbv
->nbs
, fr
->ePBC
, box
,
916 0, mdatoms
->homenr
, -1, fr
->cginfo
, x
,
918 nbv
->grp
[eintLocal
].kernel_type
,
919 nbv
->grp
[eintLocal
].nbat
);
920 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_LOCAL
);
924 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_NONLOCAL
);
925 nbnxn_put_on_grid_nonlocal(nbv
->nbs
, domdec_zones(cr
->dd
),
927 nbv
->grp
[eintNonlocal
].kernel_type
,
928 nbv
->grp
[eintNonlocal
].nbat
);
929 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_NONLOCAL
);
932 if (nbv
->ngrp
== 1 ||
933 nbv
->grp
[eintNonlocal
].nbat
== nbv
->grp
[eintLocal
].nbat
)
935 nbnxn_atomdata_set(nbv
->grp
[eintLocal
].nbat
, eatAll
,
936 nbv
->nbs
, mdatoms
, fr
->cginfo
);
940 nbnxn_atomdata_set(nbv
->grp
[eintLocal
].nbat
, eatLocal
,
941 nbv
->nbs
, mdatoms
, fr
->cginfo
);
942 nbnxn_atomdata_set(nbv
->grp
[eintNonlocal
].nbat
, eatAll
,
943 nbv
->nbs
, mdatoms
, fr
->cginfo
);
945 wallcycle_stop(wcycle
, ewcNS
);
948 /* initialize the GPU atom data and copy shift vector */
953 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
954 nbnxn_gpu_init_atomdata(nbv
->gpu_nbv
, nbv
->grp
[eintLocal
].nbat
);
955 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
958 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
959 nbnxn_gpu_upload_shiftvec(nbv
->gpu_nbv
, nbv
->grp
[eintLocal
].nbat
);
960 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
963 /* do local pair search */
966 wallcycle_start_nocount(wcycle
, ewcNS
);
967 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_LOCAL
);
968 nbnxn_make_pairlist(nbv
->nbs
, nbv
->grp
[eintLocal
].nbat
,
971 nbv
->min_ci_balanced
,
972 &nbv
->grp
[eintLocal
].nbl_lists
,
974 nbv
->grp
[eintLocal
].kernel_type
,
976 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_LOCAL
);
980 /* initialize local pair-list on the GPU */
981 nbnxn_gpu_init_pairlist(nbv
->gpu_nbv
,
982 nbv
->grp
[eintLocal
].nbl_lists
.nbl
[0],
985 wallcycle_stop(wcycle
, ewcNS
);
989 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
990 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
991 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatLocal
, FALSE
, x
,
992 nbv
->grp
[eintLocal
].nbat
);
993 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
994 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
999 wallcycle_start(wcycle
, ewcLAUNCH_GPU_NB
);
1000 /* launch local nonbonded F on GPU */
1001 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
, enbvClearFNo
,
1003 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1006 /* Communicate coordinates and sum dipole if necessary +
1007 do non-local pair search */
1008 if (DOMAINDECOMP(cr
))
1010 bDiffKernels
= (nbv
->grp
[eintNonlocal
].kernel_type
!=
1011 nbv
->grp
[eintLocal
].kernel_type
);
1015 /* With GPU+CPU non-bonded calculations we need to copy
1016 * the local coordinates to the non-local nbat struct
1017 * (in CPU format) as the non-local kernel call also
1018 * calculates the local - non-local interactions.
1020 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1021 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
1022 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatLocal
, TRUE
, x
,
1023 nbv
->grp
[eintNonlocal
].nbat
);
1024 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
1025 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1030 wallcycle_start_nocount(wcycle
, ewcNS
);
1031 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
1035 nbnxn_grid_add_simple(nbv
->nbs
, nbv
->grp
[eintNonlocal
].nbat
);
1038 nbnxn_make_pairlist(nbv
->nbs
, nbv
->grp
[eintNonlocal
].nbat
,
1041 nbv
->min_ci_balanced
,
1042 &nbv
->grp
[eintNonlocal
].nbl_lists
,
1044 nbv
->grp
[eintNonlocal
].kernel_type
,
1047 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
1049 if (nbv
->grp
[eintNonlocal
].kernel_type
== nbnxnk8x8x8_GPU
)
1051 /* initialize non-local pair-list on the GPU */
1052 nbnxn_gpu_init_pairlist(nbv
->gpu_nbv
,
1053 nbv
->grp
[eintNonlocal
].nbl_lists
.nbl
[0],
1056 wallcycle_stop(wcycle
, ewcNS
);
1060 wallcycle_start(wcycle
, ewcMOVEX
);
1061 dd_move_x(cr
->dd
, box
, x
);
1063 /* When we don't need the total dipole we sum it in global_stat */
1064 if (bStateChanged
&& inputrecNeedMutot(inputrec
))
1066 gmx_sumd(2*DIM
, mu
, cr
);
1068 wallcycle_stop(wcycle
, ewcMOVEX
);
1070 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1071 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
1072 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatNonlocal
, FALSE
, x
,
1073 nbv
->grp
[eintNonlocal
].nbat
);
1074 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
1075 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1078 if (bUseGPU
&& !bDiffKernels
)
1080 wallcycle_start(wcycle
, ewcLAUNCH_GPU_NB
);
1081 /* launch non-local nonbonded F on GPU */
1082 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
, enbvClearFNo
,
1084 cycles_force
+= wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1090 /* launch D2H copy-back F */
1091 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
1092 if (DOMAINDECOMP(cr
) && !bDiffKernels
)
1094 nbnxn_gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->grp
[eintNonlocal
].nbat
,
1095 flags
, eatNonlocal
);
1097 nbnxn_gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->grp
[eintLocal
].nbat
,
1099 cycles_force
+= wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1102 if (bStateChanged
&& inputrecNeedMutot(inputrec
))
1106 gmx_sumd(2*DIM
, mu
, cr
);
1109 for (i
= 0; i
< 2; i
++)
1111 for (j
= 0; j
< DIM
; j
++)
1113 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1117 if (fr
->efep
== efepNO
)
1119 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1123 for (j
= 0; j
< DIM
; j
++)
1126 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] +
1127 lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1131 /* Reset energies */
1132 reset_enerdata(fr
, bNS
, enerd
, MASTER(cr
));
1133 clear_rvecs(SHIFTS
, fr
->fshift
);
1135 if (DOMAINDECOMP(cr
) && !(cr
->duty
& DUTY_PME
))
1137 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1138 dd_force_flop_start(cr
->dd
, nrnb
);
1143 /* Enforced rotation has its own cycle counter that starts after the collective
1144 * coordinates have been communicated. It is added to ddCyclF to allow
1145 * for proper load-balancing */
1146 wallcycle_start(wcycle
, ewcROT
);
1147 do_rotation(cr
, inputrec
, box
, x
, t
, step
, wcycle
, bNS
);
1148 wallcycle_stop(wcycle
, ewcROT
);
1151 /* Start the force cycle counter.
1152 * This counter is stopped after do_force_lowlevel.
1153 * No parallel communication should occur while this counter is running,
1154 * since that will interfere with the dynamic load balancing.
1156 wallcycle_start(wcycle
, ewcFORCE
);
1159 /* Reset forces for which the virial is calculated separately:
1160 * PME/Ewald forces if necessary */
1161 if (fr
->bF_NoVirSum
)
1163 if (flags
& GMX_FORCE_VIRIAL
)
1165 fr
->f_novirsum
= fr
->f_novirsum_alloc
;
1168 clear_rvecs(fr
->f_novirsum_n
, fr
->f_novirsum
);
1172 clear_rvecs(homenr
, fr
->f_novirsum
+start
);
1177 /* We are not calculating the pressure so we do not need
1178 * a separate array for forces that do not contribute
1185 /* Clear the short- and long-range forces */
1186 clear_rvecs(fr
->natoms_force_constr
, f
);
1187 if (bSepLRF
&& do_per_step(step
, inputrec
->nstcalclr
))
1189 clear_rvecs(fr
->natoms_force_constr
, fr
->f_twin
);
1192 clear_rvec(fr
->vir_diag_posres
);
1195 if (inputrec
->bPull
&& pull_have_constraint(inputrec
->pull_work
))
1197 clear_pull_forces(inputrec
->pull_work
);
1200 /* We calculate the non-bonded forces, when done on the CPU, here.
1201 * We do this before calling do_force_lowlevel, because in that
1202 * function, the listed forces are calculated before PME, which
1203 * does communication. With this order, non-bonded and listed
1204 * force calculation imbalance can be balanced out by the domain
1205 * decomposition load balancing.
1210 /* Maybe we should move this into do_force_lowlevel */
1211 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
, enbvClearFYes
,
1215 if (fr
->efep
!= efepNO
)
1217 /* Calculate the local and non-local free energy interactions here.
1218 * Happens here on the CPU both with and without GPU.
1220 if (fr
->nbv
->grp
[eintLocal
].nbl_lists
.nbl_fep
[0]->nrj
> 0)
1222 do_nb_verlet_fep(&fr
->nbv
->grp
[eintLocal
].nbl_lists
,
1224 inputrec
->fepvals
, lambda
,
1225 enerd
, flags
, nrnb
, wcycle
);
1228 if (DOMAINDECOMP(cr
) &&
1229 fr
->nbv
->grp
[eintNonlocal
].nbl_lists
.nbl_fep
[0]->nrj
> 0)
1231 do_nb_verlet_fep(&fr
->nbv
->grp
[eintNonlocal
].nbl_lists
,
1233 inputrec
->fepvals
, lambda
,
1234 enerd
, flags
, nrnb
, wcycle
);
1238 if (!bUseOrEmulGPU
|| bDiffKernels
)
1242 if (DOMAINDECOMP(cr
))
1244 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
,
1245 bDiffKernels
? enbvClearFYes
: enbvClearFNo
,
1255 aloc
= eintNonlocal
;
1258 /* Add all the non-bonded force to the normal force array.
1259 * This can be split into a local and a non-local part when overlapping
1260 * communication with calculation with domain decomposition.
1262 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1263 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1264 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1265 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatAll
, nbv
->grp
[aloc
].nbat
, f
);
1266 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1267 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1268 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1270 /* if there are multiple fshift output buffers reduce them */
1271 if ((flags
& GMX_FORCE_VIRIAL
) &&
1272 nbv
->grp
[aloc
].nbl_lists
.nnbl
> 1)
1274 /* This is not in a subcounter because it takes a
1275 negligible and constant-sized amount of time */
1276 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv
->grp
[aloc
].nbat
,
1281 /* update QMMMrec, if necessary */
1284 update_QMMMrec(cr
, fr
, x
, mdatoms
, box
, top
);
1287 /* Compute the bonded and non-bonded energies and optionally forces */
1288 do_force_lowlevel(fr
, inputrec
, &(top
->idef
),
1289 cr
, nrnb
, wcycle
, mdatoms
,
1290 x
, hist
, f
, bSepLRF
? fr
->f_twin
: f
, enerd
, fcd
, top
, fr
->born
,
1292 inputrec
->fepvals
, lambda
, graph
, &(top
->excls
), fr
->mu_tot
,
1293 flags
, &cycles_pme
);
1297 if (do_per_step(step
, inputrec
->nstcalclr
))
1299 /* Add the long range forces to the short range forces */
1300 for (i
= 0; i
< fr
->natoms_force_constr
; i
++)
1302 rvec_add(fr
->f_twin
[i
], f
[i
], f
[i
]);
1307 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1311 do_flood(cr
, inputrec
, x
, f
, ed
, box
, step
, bNS
);
1314 if (bUseOrEmulGPU
&& !bDiffKernels
)
1316 /* wait for non-local forces (or calculate in emulation mode) */
1317 if (DOMAINDECOMP(cr
))
1323 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_NL
);
1324 nbnxn_gpu_wait_for_gpu(nbv
->gpu_nbv
,
1326 enerd
->grpp
.ener
[egLJSR
], enerd
->grpp
.ener
[egCOULSR
],
1328 cycles_tmp
= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_NL
);
1329 cycles_wait_gpu
+= cycles_tmp
;
1330 cycles_force
+= cycles_tmp
;
1334 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1335 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
, enbvClearFYes
,
1337 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1339 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1340 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1341 /* skip the reduction if there was no non-local work to do */
1342 if (nbv
->grp
[eintNonlocal
].nbl_lists
.nbl
[0]->nsci
> 0)
1344 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatNonlocal
,
1345 nbv
->grp
[eintNonlocal
].nbat
, f
);
1347 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1348 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1352 if (bDoForces
&& DOMAINDECOMP(cr
))
1354 if (bUseGPU
&& useCuda
)
1356 /* We are done with the CPU compute, but the GPU local non-bonded
1357 * kernel can still be running while we communicate the forces.
1358 * We start a counter here, so we can, hopefully, time the rest
1359 * of the GPU kernel execution and data transfer.
1361 cycleCountBeforeLocalWorkCompletes
= gmx_cycles_read();
1364 /* Communicate the forces */
1365 wallcycle_start(wcycle
, ewcMOVEF
);
1366 dd_move_f(cr
->dd
, f
, fr
->fshift
);
1369 /* We should not update the shift forces here,
1370 * since f_twin is already included in f.
1372 dd_move_f(cr
->dd
, fr
->f_twin
, NULL
);
1374 wallcycle_stop(wcycle
, ewcMOVEF
);
1379 /* wait for local forces (or calculate in emulation mode) */
1380 if (bUseGPU
&& useCuda
)
1382 float cycles_tmp
, cycles_wait_est
;
1383 const float cuda_api_overhead_margin
= 50000.0f
; /* cycles */
1385 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_L
);
1386 nbnxn_gpu_wait_for_gpu(nbv
->gpu_nbv
,
1388 enerd
->grpp
.ener
[egLJSR
], enerd
->grpp
.ener
[egCOULSR
],
1390 cycles_tmp
= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_L
);
1392 if (bDoForces
&& DOMAINDECOMP(cr
))
1394 cycles_wait_est
= gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes
;
1396 if (cycles_tmp
< cuda_api_overhead_margin
)
1398 /* We measured few cycles, it could be that the kernel
1399 * and transfer finished earlier and there was no actual
1400 * wait time, only API call overhead.
1401 * Then the actual time could be anywhere between 0 and
1402 * cycles_wait_est. As a compromise, we use half the time.
1404 cycles_wait_est
*= 0.5f
;
1409 /* No force communication so we actually timed the wait */
1410 cycles_wait_est
= cycles_tmp
;
1412 /* Even though this is after dd_move_f, the actual task we are
1413 * waiting for runs asynchronously with dd_move_f and we usually
1414 * have nothing to balance it with, so we can and should add
1415 * the time to the force time for load balancing.
1417 cycles_force
+= cycles_wait_est
;
1418 cycles_wait_gpu
+= cycles_wait_est
;
1420 else if (bUseGPU
&& useOpenCL
)
1423 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_L
);
1424 nbnxn_gpu_wait_for_gpu(nbv
->gpu_nbv
,
1426 enerd
->grpp
.ener
[egLJSR
], enerd
->grpp
.ener
[egCOULSR
],
1428 cycles_wait_gpu
+= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_L
);
1432 /* now clear the GPU outputs while we finish the step on the CPU */
1433 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
1434 nbnxn_gpu_clear_outputs(nbv
->gpu_nbv
, flags
);
1435 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1439 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1440 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
,
1441 DOMAINDECOMP(cr
) ? enbvClearFNo
: enbvClearFYes
,
1443 wallcycle_stop(wcycle
, ewcFORCE
);
1445 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1446 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1447 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatLocal
,
1448 nbv
->grp
[eintLocal
].nbat
, f
);
1449 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1450 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1453 if (DOMAINDECOMP(cr
))
1455 dd_force_flop_stop(cr
->dd
, nrnb
);
1458 dd_cycles_add(cr
->dd
, cycles_force
-cycles_pme
, ddCyclF
);
1461 dd_cycles_add(cr
->dd
, cycles_wait_gpu
, ddCyclWaitGPU
);
1468 if (inputrecElecField(inputrec
))
1470 /* Compute forces due to electric field */
1471 calc_f_el(MASTER(cr
) ? field
: NULL
,
1472 start
, homenr
, mdatoms
->chargeA
, fr
->f_novirsum
,
1473 inputrec
->ex
, inputrec
->et
, t
);
1476 /* If we have NoVirSum forces, but we do not calculate the virial,
1477 * we sum fr->f_novirsum=f later.
1479 if (vsite
&& !(fr
->bF_NoVirSum
&& !(flags
& GMX_FORCE_VIRIAL
)))
1481 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1482 spread_vsite_f(vsite
, x
, f
, fr
->fshift
, FALSE
, NULL
, nrnb
,
1483 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1484 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1488 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1489 spread_vsite_f(vsite
, x
, fr
->f_twin
, NULL
, FALSE
, NULL
,
1491 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1492 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1496 if (flags
& GMX_FORCE_VIRIAL
)
1498 /* Calculation of the virial must be done after vsites! */
1499 calc_virial(0, mdatoms
->homenr
, x
, f
,
1500 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1504 if (inputrec
->bPull
&& pull_have_potential(inputrec
->pull_work
))
1506 /* Since the COM pulling is always done mass-weighted, no forces are
1507 * applied to vsites and this call can be done after vsite spreading.
1509 pull_potential_wrapper(cr
, inputrec
, box
, x
,
1510 f
, vir_force
, mdatoms
, enerd
, lambda
, t
,
1514 /* Add the forces from enforced rotation potentials (if any) */
1517 wallcycle_start(wcycle
, ewcROTadd
);
1518 enerd
->term
[F_COM_PULL
] += add_rot_forces(inputrec
->rot
, f
, cr
, step
, t
);
1519 wallcycle_stop(wcycle
, ewcROTadd
);
1522 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1523 IMD_apply_forces(inputrec
->bIMD
, inputrec
->imd
, cr
, f
, wcycle
);
1525 if (PAR(cr
) && !(cr
->duty
& DUTY_PME
))
1527 /* In case of node-splitting, the PP nodes receive the long-range
1528 * forces, virial and energy from the PME nodes here.
1530 pme_receive_force_ener(cr
, wcycle
, enerd
, fr
);
1535 post_process_forces(cr
, step
, nrnb
, wcycle
,
1536 top
, box
, x
, f
, vir_force
, mdatoms
, graph
, fr
, vsite
,
1540 /* Sum the potential energy terms from group contributions */
1541 sum_epot(&(enerd
->grpp
), enerd
->term
);
1544 void do_force_cutsGROUP(FILE *fplog
, t_commrec
*cr
,
1545 t_inputrec
*inputrec
,
1546 gmx_int64_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
1547 gmx_localtop_t
*top
,
1548 gmx_groups_t
*groups
,
1549 matrix box
, rvec x
[], history_t
*hist
,
1553 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
1554 real
*lambda
, t_graph
*graph
,
1555 t_forcerec
*fr
, gmx_vsite_t
*vsite
, rvec mu_tot
,
1556 double t
, FILE *field
, gmx_edsam_t ed
,
1557 gmx_bool bBornRadii
,
1563 gmx_bool bStateChanged
, bNS
, bFillGrid
, bCalcCGCM
;
1564 gmx_bool bDoLongRangeNS
, bDoForces
, bSepLRF
;
1565 float cycles_pme
, cycles_force
;
1568 homenr
= mdatoms
->homenr
;
1570 clear_mat(vir_force
);
1573 if (DOMAINDECOMP(cr
))
1575 cg1
= cr
->dd
->ncg_tot
;
1586 bStateChanged
= (flags
& GMX_FORCE_STATECHANGED
);
1587 bNS
= (flags
& GMX_FORCE_NS
) && (fr
->bAllvsAll
== FALSE
);
1588 /* Should we update the long-range neighborlists at this step? */
1589 bDoLongRangeNS
= fr
->bTwinRange
&& bNS
;
1590 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1591 bFillGrid
= (bNS
&& bStateChanged
);
1592 bCalcCGCM
= (bFillGrid
&& !DOMAINDECOMP(cr
));
1593 bDoForces
= (flags
& GMX_FORCE_FORCES
);
1594 bSepLRF
= ((inputrec
->nstcalclr
> 1) && bDoForces
&&
1595 (flags
& GMX_FORCE_SEPLRF
) && (flags
& GMX_FORCE_DO_LR
));
1599 update_forcerec(fr
, box
);
1601 if (inputrecNeedMutot(inputrec
))
1603 /* Calculate total (local) dipole moment in a temporary common array.
1604 * This makes it possible to sum them over nodes faster.
1606 calc_mu(start
, homenr
,
1607 x
, mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
1612 if (fr
->ePBC
!= epbcNONE
)
1614 /* Compute shift vectors every step,
1615 * because of pressure coupling or box deformation!
1617 if ((flags
& GMX_FORCE_DYNAMICBOX
) && bStateChanged
)
1619 calc_shifts(box
, fr
->shift_vec
);
1624 put_charge_groups_in_box(fplog
, cg0
, cg1
, fr
->ePBC
, box
,
1625 &(top
->cgs
), x
, fr
->cg_cm
);
1626 inc_nrnb(nrnb
, eNR_CGCM
, homenr
);
1627 inc_nrnb(nrnb
, eNR_RESETX
, cg1
-cg0
);
1629 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
1631 unshift_self(graph
, box
, x
);
1636 calc_cgcm(fplog
, cg0
, cg1
, &(top
->cgs
), x
, fr
->cg_cm
);
1637 inc_nrnb(nrnb
, eNR_CGCM
, homenr
);
1640 if (bCalcCGCM
&& gmx_debug_at
)
1642 pr_rvecs(debug
, 0, "cgcm", fr
->cg_cm
, top
->cgs
.nr
);
1646 if (!(cr
->duty
& DUTY_PME
))
1651 /* Send particle coordinates to the pme nodes.
1652 * Since this is only implemented for domain decomposition
1653 * and domain decomposition does not use the graph,
1654 * we do not need to worry about shifting.
1659 wallcycle_start(wcycle
, ewcPP_PMESENDX
);
1661 bBS
= (inputrec
->nwall
== 2);
1664 copy_mat(box
, boxs
);
1665 svmul(inputrec
->wall_ewald_zfac
, boxs
[ZZ
], boxs
[ZZ
]);
1668 if (EEL_PME(fr
->eeltype
))
1670 pme_flags
|= GMX_PME_DO_COULOMB
;
1673 if (EVDW_PME(fr
->vdwtype
))
1675 pme_flags
|= GMX_PME_DO_LJ
;
1678 gmx_pme_send_coordinates(cr
, bBS
? boxs
: box
, x
,
1679 mdatoms
->nChargePerturbed
, mdatoms
->nTypePerturbed
, lambda
[efptCOUL
], lambda
[efptVDW
],
1680 (flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)),
1683 wallcycle_stop(wcycle
, ewcPP_PMESENDX
);
1685 #endif /* GMX_MPI */
1687 /* Communicate coordinates and sum dipole if necessary */
1688 if (DOMAINDECOMP(cr
))
1690 wallcycle_start(wcycle
, ewcMOVEX
);
1691 dd_move_x(cr
->dd
, box
, x
);
1692 wallcycle_stop(wcycle
, ewcMOVEX
);
1695 if (inputrecNeedMutot(inputrec
))
1701 gmx_sumd(2*DIM
, mu
, cr
);
1703 for (i
= 0; i
< 2; i
++)
1705 for (j
= 0; j
< DIM
; j
++)
1707 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1711 if (fr
->efep
== efepNO
)
1713 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1717 for (j
= 0; j
< DIM
; j
++)
1720 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] + lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1725 /* Reset energies */
1726 reset_enerdata(fr
, bNS
, enerd
, MASTER(cr
));
1727 clear_rvecs(SHIFTS
, fr
->fshift
);
1731 wallcycle_start(wcycle
, ewcNS
);
1733 if (graph
&& bStateChanged
)
1735 /* Calculate intramolecular shift vectors to make molecules whole */
1736 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
1739 /* Do the actual neighbour searching */
1741 groups
, top
, mdatoms
,
1742 cr
, nrnb
, bFillGrid
,
1745 wallcycle_stop(wcycle
, ewcNS
);
1748 if (inputrec
->implicit_solvent
&& bNS
)
1750 make_gb_nblist(cr
, inputrec
->gb_algorithm
,
1751 x
, box
, fr
, &top
->idef
, graph
, fr
->born
);
1754 if (DOMAINDECOMP(cr
) && !(cr
->duty
& DUTY_PME
))
1756 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1757 dd_force_flop_start(cr
->dd
, nrnb
);
1762 /* Enforced rotation has its own cycle counter that starts after the collective
1763 * coordinates have been communicated. It is added to ddCyclF to allow
1764 * for proper load-balancing */
1765 wallcycle_start(wcycle
, ewcROT
);
1766 do_rotation(cr
, inputrec
, box
, x
, t
, step
, wcycle
, bNS
);
1767 wallcycle_stop(wcycle
, ewcROT
);
1770 /* Start the force cycle counter.
1771 * This counter is stopped after do_force_lowlevel.
1772 * No parallel communication should occur while this counter is running,
1773 * since that will interfere with the dynamic load balancing.
1775 wallcycle_start(wcycle
, ewcFORCE
);
1779 /* Reset forces for which the virial is calculated separately:
1780 * PME/Ewald forces if necessary */
1781 if (fr
->bF_NoVirSum
)
1783 if (flags
& GMX_FORCE_VIRIAL
)
1785 fr
->f_novirsum
= fr
->f_novirsum_alloc
;
1788 clear_rvecs(fr
->f_novirsum_n
, fr
->f_novirsum
);
1792 clear_rvecs(homenr
, fr
->f_novirsum
+start
);
1797 /* We are not calculating the pressure so we do not need
1798 * a separate array for forces that do not contribute
1805 /* Clear the short- and long-range forces */
1806 clear_rvecs(fr
->natoms_force_constr
, f
);
1807 if (bSepLRF
&& do_per_step(step
, inputrec
->nstcalclr
))
1809 clear_rvecs(fr
->natoms_force_constr
, fr
->f_twin
);
1812 clear_rvec(fr
->vir_diag_posres
);
1814 if (inputrec
->bPull
&& pull_have_constraint(inputrec
->pull_work
))
1816 clear_pull_forces(inputrec
->pull_work
);
1819 /* update QMMMrec, if necessary */
1822 update_QMMMrec(cr
, fr
, x
, mdatoms
, box
, top
);
1825 /* Compute the bonded and non-bonded energies and optionally forces */
1826 do_force_lowlevel(fr
, inputrec
, &(top
->idef
),
1827 cr
, nrnb
, wcycle
, mdatoms
,
1828 x
, hist
, f
, bSepLRF
? fr
->f_twin
: f
, enerd
, fcd
, top
, fr
->born
,
1830 inputrec
->fepvals
, lambda
,
1831 graph
, &(top
->excls
), fr
->mu_tot
,
1837 if (do_per_step(step
, inputrec
->nstcalclr
))
1839 /* Add the long range forces to the short range forces */
1840 for (i
= 0; i
< fr
->natoms_force_constr
; i
++)
1842 rvec_add(fr
->f_twin
[i
], f
[i
], f
[i
]);
1847 cycles_force
= wallcycle_stop(wcycle
, ewcFORCE
);
1851 do_flood(cr
, inputrec
, x
, f
, ed
, box
, step
, bNS
);
1854 if (DOMAINDECOMP(cr
))
1856 dd_force_flop_stop(cr
->dd
, nrnb
);
1859 dd_cycles_add(cr
->dd
, cycles_force
-cycles_pme
, ddCyclF
);
1865 if (inputrecElecField(inputrec
))
1867 /* Compute forces due to electric field */
1868 calc_f_el(MASTER(cr
) ? field
: NULL
,
1869 start
, homenr
, mdatoms
->chargeA
, fr
->f_novirsum
,
1870 inputrec
->ex
, inputrec
->et
, t
);
1873 /* Communicate the forces */
1874 if (DOMAINDECOMP(cr
))
1876 wallcycle_start(wcycle
, ewcMOVEF
);
1877 dd_move_f(cr
->dd
, f
, fr
->fshift
);
1878 /* Do we need to communicate the separate force array
1879 * for terms that do not contribute to the single sum virial?
1880 * Position restraints and electric fields do not introduce
1881 * inter-cg forces, only full electrostatics methods do.
1882 * When we do not calculate the virial, fr->f_novirsum = f,
1883 * so we have already communicated these forces.
1885 if (EEL_FULL(fr
->eeltype
) && cr
->dd
->n_intercg_excl
&&
1886 (flags
& GMX_FORCE_VIRIAL
))
1888 dd_move_f(cr
->dd
, fr
->f_novirsum
, NULL
);
1892 /* We should not update the shift forces here,
1893 * since f_twin is already included in f.
1895 dd_move_f(cr
->dd
, fr
->f_twin
, NULL
);
1897 wallcycle_stop(wcycle
, ewcMOVEF
);
1900 /* If we have NoVirSum forces, but we do not calculate the virial,
1901 * we sum fr->f_novirsum=f later.
1903 if (vsite
&& !(fr
->bF_NoVirSum
&& !(flags
& GMX_FORCE_VIRIAL
)))
1905 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1906 spread_vsite_f(vsite
, x
, f
, fr
->fshift
, FALSE
, NULL
, nrnb
,
1907 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1908 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1912 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1913 spread_vsite_f(vsite
, x
, fr
->f_twin
, NULL
, FALSE
, NULL
,
1915 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1916 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1920 if (flags
& GMX_FORCE_VIRIAL
)
1922 /* Calculation of the virial must be done after vsites! */
1923 calc_virial(0, mdatoms
->homenr
, x
, f
,
1924 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1928 if (inputrec
->bPull
&& pull_have_potential(inputrec
->pull_work
))
1930 pull_potential_wrapper(cr
, inputrec
, box
, x
,
1931 f
, vir_force
, mdatoms
, enerd
, lambda
, t
,
1935 /* Add the forces from enforced rotation potentials (if any) */
1938 wallcycle_start(wcycle
, ewcROTadd
);
1939 enerd
->term
[F_COM_PULL
] += add_rot_forces(inputrec
->rot
, f
, cr
, step
, t
);
1940 wallcycle_stop(wcycle
, ewcROTadd
);
1943 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1944 IMD_apply_forces(inputrec
->bIMD
, inputrec
->imd
, cr
, f
, wcycle
);
1946 if (PAR(cr
) && !(cr
->duty
& DUTY_PME
))
1948 /* In case of node-splitting, the PP nodes receive the long-range
1949 * forces, virial and energy from the PME nodes here.
1951 pme_receive_force_ener(cr
, wcycle
, enerd
, fr
);
1956 post_process_forces(cr
, step
, nrnb
, wcycle
,
1957 top
, box
, x
, f
, vir_force
, mdatoms
, graph
, fr
, vsite
,
1961 /* Sum the potential energy terms from group contributions */
1962 sum_epot(&(enerd
->grpp
), enerd
->term
);
1965 void do_force(FILE *fplog
, t_commrec
*cr
,
1966 t_inputrec
*inputrec
,
1967 gmx_int64_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
1968 gmx_localtop_t
*top
,
1969 gmx_groups_t
*groups
,
1970 matrix box
, rvec x
[], history_t
*hist
,
1974 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
1975 real
*lambda
, t_graph
*graph
,
1977 gmx_vsite_t
*vsite
, rvec mu_tot
,
1978 double t
, FILE *field
, gmx_edsam_t ed
,
1979 gmx_bool bBornRadii
,
1982 /* modify force flag if not doing nonbonded */
1983 if (!fr
->bNonbonded
)
1985 flags
&= ~GMX_FORCE_NONBONDED
;
1988 switch (inputrec
->cutoff_scheme
)
1991 do_force_cutsVERLET(fplog
, cr
, inputrec
,
2007 do_force_cutsGROUP(fplog
, cr
, inputrec
,
2022 gmx_incons("Invalid cut-off scheme passed!");
2027 void do_constrain_first(FILE *fplog
, gmx_constr_t constr
,
2028 t_inputrec
*ir
, t_mdatoms
*md
,
2029 t_state
*state
, t_commrec
*cr
, t_nrnb
*nrnb
,
2030 t_forcerec
*fr
, gmx_localtop_t
*top
)
2032 int i
, m
, start
, end
;
2034 real dt
= ir
->delta_t
;
2038 snew(savex
, state
->natoms
);
2045 fprintf(debug
, "vcm: start=%d, homenr=%d, end=%d\n",
2046 start
, md
->homenr
, end
);
2048 /* Do a first constrain to reset particles... */
2049 step
= ir
->init_step
;
2052 char buf
[STEPSTRSIZE
];
2053 fprintf(fplog
, "\nConstraining the starting coordinates (step %s)\n",
2054 gmx_step_str(step
, buf
));
2058 /* constrain the current position */
2059 constrain(NULL
, TRUE
, FALSE
, constr
, &(top
->idef
),
2060 ir
, cr
, step
, 0, 1.0, md
,
2061 state
->x
, state
->x
, NULL
,
2062 fr
->bMolPBC
, state
->box
,
2063 state
->lambda
[efptBONDED
], &dvdl_dum
,
2064 NULL
, NULL
, nrnb
, econqCoord
);
2067 /* constrain the inital velocity, and save it */
2068 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2069 constrain(NULL
, TRUE
, FALSE
, constr
, &(top
->idef
),
2070 ir
, cr
, step
, 0, 1.0, md
,
2071 state
->x
, state
->v
, state
->v
,
2072 fr
->bMolPBC
, state
->box
,
2073 state
->lambda
[efptBONDED
], &dvdl_dum
,
2074 NULL
, NULL
, nrnb
, econqVeloc
);
2076 /* constrain the inital velocities at t-dt/2 */
2077 if (EI_STATE_VELOCITY(ir
->eI
) && ir
->eI
!= eiVV
)
2079 for (i
= start
; (i
< end
); i
++)
2081 for (m
= 0; (m
< DIM
); m
++)
2083 /* Reverse the velocity */
2084 state
->v
[i
][m
] = -state
->v
[i
][m
];
2085 /* Store the position at t-dt in buf */
2086 savex
[i
][m
] = state
->x
[i
][m
] + dt
*state
->v
[i
][m
];
2089 /* Shake the positions at t=-dt with the positions at t=0
2090 * as reference coordinates.
2094 char buf
[STEPSTRSIZE
];
2095 fprintf(fplog
, "\nConstraining the coordinates at t0-dt (step %s)\n",
2096 gmx_step_str(step
, buf
));
2099 constrain(NULL
, TRUE
, FALSE
, constr
, &(top
->idef
),
2100 ir
, cr
, step
, -1, 1.0, md
,
2101 state
->x
, savex
, NULL
,
2102 fr
->bMolPBC
, state
->box
,
2103 state
->lambda
[efptBONDED
], &dvdl_dum
,
2104 state
->v
, NULL
, nrnb
, econqCoord
);
2106 for (i
= start
; i
< end
; i
++)
2108 for (m
= 0; m
< DIM
; m
++)
2110 /* Re-reverse the velocities */
2111 state
->v
[i
][m
] = -state
->v
[i
][m
];
2120 integrate_table(real vdwtab
[], real scale
, int offstart
, int rstart
, int rend
,
2121 double *enerout
, double *virout
)
2123 double enersum
, virsum
;
2124 double invscale
, invscale2
, invscale3
;
2125 double r
, ea
, eb
, ec
, pa
, pb
, pc
, pd
;
2130 invscale
= 1.0/scale
;
2131 invscale2
= invscale
*invscale
;
2132 invscale3
= invscale
*invscale2
;
2134 /* Following summation derived from cubic spline definition,
2135 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2136 * the cubic spline. We first calculate the negative of the
2137 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2138 * add the more standard, abrupt cutoff correction to that result,
2139 * yielding the long-range correction for a switched function. We
2140 * perform both the pressure and energy loops at the same time for
2141 * simplicity, as the computational cost is low. */
2145 /* Since the dispersion table has been scaled down a factor
2146 * 6.0 and the repulsion a factor 12.0 to compensate for the
2147 * c6/c12 parameters inside nbfp[] being scaled up (to save
2148 * flops in kernels), we need to correct for this.
2159 for (ri
= rstart
; ri
< rend
; ++ri
)
2163 eb
= 2.0*invscale2
*r
;
2167 pb
= 3.0*invscale2
*r
;
2168 pc
= 3.0*invscale
*r
*r
;
2171 /* this "8" is from the packing in the vdwtab array - perhaps
2172 should be defined? */
2174 offset
= 8*ri
+ offstart
;
2175 y0
= vdwtab
[offset
];
2176 f
= vdwtab
[offset
+1];
2177 g
= vdwtab
[offset
+2];
2178 h
= vdwtab
[offset
+3];
2180 enersum
+= y0
*(ea
/3 + eb
/2 + ec
) + f
*(ea
/4 + eb
/3 + ec
/2) + g
*(ea
/5 + eb
/4 + ec
/3) + h
*(ea
/6 + eb
/5 + ec
/4);
2181 virsum
+= f
*(pa
/4 + pb
/3 + pc
/2 + pd
) + 2*g
*(pa
/5 + pb
/4 + pc
/3 + pd
/2) + 3*h
*(pa
/6 + pb
/5 + pc
/4 + pd
/3);
2183 *enerout
= 4.0*M_PI
*enersum
*tabfactor
;
2184 *virout
= 4.0*M_PI
*virsum
*tabfactor
;
2187 void calc_enervirdiff(FILE *fplog
, int eDispCorr
, t_forcerec
*fr
)
2189 double eners
[2], virs
[2], enersum
, virsum
;
2190 double r0
, rc3
, rc9
;
2192 real scale
, *vdwtab
;
2194 fr
->enershiftsix
= 0;
2195 fr
->enershifttwelve
= 0;
2196 fr
->enerdiffsix
= 0;
2197 fr
->enerdifftwelve
= 0;
2199 fr
->virdifftwelve
= 0;
2201 if (eDispCorr
!= edispcNO
)
2203 for (i
= 0; i
< 2; i
++)
2208 if ((fr
->vdw_modifier
== eintmodPOTSHIFT
) ||
2209 (fr
->vdw_modifier
== eintmodPOTSWITCH
) ||
2210 (fr
->vdw_modifier
== eintmodFORCESWITCH
) ||
2211 (fr
->vdwtype
== evdwSHIFT
) ||
2212 (fr
->vdwtype
== evdwSWITCH
))
2214 if (((fr
->vdw_modifier
== eintmodPOTSWITCH
) ||
2215 (fr
->vdw_modifier
== eintmodFORCESWITCH
) ||
2216 (fr
->vdwtype
== evdwSWITCH
)) && fr
->rvdw_switch
== 0)
2219 "With dispersion correction rvdw-switch can not be zero "
2220 "for vdw-type = %s", evdw_names
[fr
->vdwtype
]);
2223 /* TODO This code depends on the logic in tables.c that
2224 constructs the table layout, which should be made
2225 explicit in future cleanup. */
2226 GMX_ASSERT(fr
->nblists
[0].table_vdw
->interaction
== GMX_TABLE_INTERACTION_VDWREP_VDWDISP
,
2227 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2228 scale
= fr
->nblists
[0].table_vdw
->scale
;
2229 vdwtab
= fr
->nblists
[0].table_vdw
->data
;
2231 /* Round the cut-offs to exact table values for precision */
2232 ri0
= static_cast<int>(floor(fr
->rvdw_switch
*scale
));
2233 ri1
= static_cast<int>(ceil(fr
->rvdw
*scale
));
2235 /* The code below has some support for handling force-switching, i.e.
2236 * when the force (instead of potential) is switched over a limited
2237 * region. This leads to a constant shift in the potential inside the
2238 * switching region, which we can handle by adding a constant energy
2239 * term in the force-switch case just like when we do potential-shift.
2241 * For now this is not enabled, but to keep the functionality in the
2242 * code we check separately for switch and shift. When we do force-switch
2243 * the shifting point is rvdw_switch, while it is the cutoff when we
2244 * have a classical potential-shift.
2246 * For a pure potential-shift the potential has a constant shift
2247 * all the way out to the cutoff, and that is it. For other forms
2248 * we need to calculate the constant shift up to the point where we
2249 * start modifying the potential.
2251 ri0
= (fr
->vdw_modifier
== eintmodPOTSHIFT
) ? ri1
: ri0
;
2257 if ((fr
->vdw_modifier
== eintmodFORCESWITCH
) ||
2258 (fr
->vdwtype
== evdwSHIFT
))
2260 /* Determine the constant energy shift below rvdw_switch.
2261 * Table has a scale factor since we have scaled it down to compensate
2262 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2264 fr
->enershiftsix
= (real
)(-1.0/(rc3
*rc3
)) - 6.0*vdwtab
[8*ri0
];
2265 fr
->enershifttwelve
= (real
)( 1.0/(rc9
*rc3
)) - 12.0*vdwtab
[8*ri0
+ 4];
2267 else if (fr
->vdw_modifier
== eintmodPOTSHIFT
)
2269 fr
->enershiftsix
= (real
)(-1.0/(rc3
*rc3
));
2270 fr
->enershifttwelve
= (real
)( 1.0/(rc9
*rc3
));
2273 /* Add the constant part from 0 to rvdw_switch.
2274 * This integration from 0 to rvdw_switch overcounts the number
2275 * of interactions by 1, as it also counts the self interaction.
2276 * We will correct for this later.
2278 eners
[0] += 4.0*M_PI
*fr
->enershiftsix
*rc3
/3.0;
2279 eners
[1] += 4.0*M_PI
*fr
->enershifttwelve
*rc3
/3.0;
2281 /* Calculate the contribution in the range [r0,r1] where we
2282 * modify the potential. For a pure potential-shift modifier we will
2283 * have ri0==ri1, and there will not be any contribution here.
2285 for (i
= 0; i
< 2; i
++)
2289 integrate_table(vdwtab
, scale
, (i
== 0 ? 0 : 4), ri0
, ri1
, &enersum
, &virsum
);
2290 eners
[i
] -= enersum
;
2294 /* Alright: Above we compensated by REMOVING the parts outside r0
2295 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2297 * Regardless of whether r0 is the point where we start switching,
2298 * or the cutoff where we calculated the constant shift, we include
2299 * all the parts we are missing out to infinity from r0 by
2300 * calculating the analytical dispersion correction.
2302 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2303 eners
[1] += 4.0*M_PI
/(9.0*rc9
);
2304 virs
[0] += 8.0*M_PI
/rc3
;
2305 virs
[1] += -16.0*M_PI
/(3.0*rc9
);
2307 else if (fr
->vdwtype
== evdwCUT
||
2308 EVDW_PME(fr
->vdwtype
) ||
2309 fr
->vdwtype
== evdwUSER
)
2311 if (fr
->vdwtype
== evdwUSER
&& fplog
)
2314 "WARNING: using dispersion correction with user tables\n");
2317 /* Note that with LJ-PME, the dispersion correction is multiplied
2318 * by the difference between the actual C6 and the value of C6
2319 * that would produce the combination rule.
2320 * This means the normal energy and virial difference formulas
2324 rc3
= fr
->rvdw
*fr
->rvdw
*fr
->rvdw
;
2326 /* Contribution beyond the cut-off */
2327 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2328 eners
[1] += 4.0*M_PI
/(9.0*rc9
);
2329 if (fr
->vdw_modifier
== eintmodPOTSHIFT
)
2331 /* Contribution within the cut-off */
2332 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2333 eners
[1] += 4.0*M_PI
/(3.0*rc9
);
2335 /* Contribution beyond the cut-off */
2336 virs
[0] += 8.0*M_PI
/rc3
;
2337 virs
[1] += -16.0*M_PI
/(3.0*rc9
);
2342 "Dispersion correction is not implemented for vdw-type = %s",
2343 evdw_names
[fr
->vdwtype
]);
2346 /* When we deprecate the group kernels the code below can go too */
2347 if (fr
->vdwtype
== evdwPME
&& fr
->cutoff_scheme
== ecutsGROUP
)
2349 /* Calculate self-interaction coefficient (assuming that
2350 * the reciprocal-space contribution is constant in the
2351 * region that contributes to the self-interaction).
2353 fr
->enershiftsix
= pow(fr
->ewaldcoeff_lj
, 6) / 6.0;
2355 eners
[0] += -pow(sqrt(M_PI
)*fr
->ewaldcoeff_lj
, 3)/3.0;
2356 virs
[0] += pow(sqrt(M_PI
)*fr
->ewaldcoeff_lj
, 3);
2359 fr
->enerdiffsix
= eners
[0];
2360 fr
->enerdifftwelve
= eners
[1];
2361 /* The 0.5 is due to the Gromacs definition of the virial */
2362 fr
->virdiffsix
= 0.5*virs
[0];
2363 fr
->virdifftwelve
= 0.5*virs
[1];
2367 void calc_dispcorr(t_inputrec
*ir
, t_forcerec
*fr
,
2369 matrix box
, real lambda
, tensor pres
, tensor virial
,
2370 real
*prescorr
, real
*enercorr
, real
*dvdlcorr
)
2372 gmx_bool bCorrAll
, bCorrPres
;
2373 real dvdlambda
, invvol
, dens
, ninter
, avcsix
, avctwelve
, enerdiff
, svir
= 0, spres
= 0;
2383 if (ir
->eDispCorr
!= edispcNO
)
2385 bCorrAll
= (ir
->eDispCorr
== edispcAllEner
||
2386 ir
->eDispCorr
== edispcAllEnerPres
);
2387 bCorrPres
= (ir
->eDispCorr
== edispcEnerPres
||
2388 ir
->eDispCorr
== edispcAllEnerPres
);
2390 invvol
= 1/det(box
);
2393 /* Only correct for the interactions with the inserted molecule */
2394 dens
= (natoms
- fr
->n_tpi
)*invvol
;
2399 dens
= natoms
*invvol
;
2400 ninter
= 0.5*natoms
;
2403 if (ir
->efep
== efepNO
)
2405 avcsix
= fr
->avcsix
[0];
2406 avctwelve
= fr
->avctwelve
[0];
2410 avcsix
= (1 - lambda
)*fr
->avcsix
[0] + lambda
*fr
->avcsix
[1];
2411 avctwelve
= (1 - lambda
)*fr
->avctwelve
[0] + lambda
*fr
->avctwelve
[1];
2414 enerdiff
= ninter
*(dens
*fr
->enerdiffsix
- fr
->enershiftsix
);
2415 *enercorr
+= avcsix
*enerdiff
;
2417 if (ir
->efep
!= efepNO
)
2419 dvdlambda
+= (fr
->avcsix
[1] - fr
->avcsix
[0])*enerdiff
;
2423 enerdiff
= ninter
*(dens
*fr
->enerdifftwelve
- fr
->enershifttwelve
);
2424 *enercorr
+= avctwelve
*enerdiff
;
2425 if (fr
->efep
!= efepNO
)
2427 dvdlambda
+= (fr
->avctwelve
[1] - fr
->avctwelve
[0])*enerdiff
;
2433 svir
= ninter
*dens
*avcsix
*fr
->virdiffsix
/3.0;
2434 if (ir
->eDispCorr
== edispcAllEnerPres
)
2436 svir
+= ninter
*dens
*avctwelve
*fr
->virdifftwelve
/3.0;
2438 /* The factor 2 is because of the Gromacs virial definition */
2439 spres
= -2.0*invvol
*svir
*PRESFAC
;
2441 for (m
= 0; m
< DIM
; m
++)
2443 virial
[m
][m
] += svir
;
2444 pres
[m
][m
] += spres
;
2449 /* Can't currently control when it prints, for now, just print when degugging */
2454 fprintf(debug
, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2460 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2461 *enercorr
, spres
, svir
);
2465 fprintf(debug
, "Long Range LJ corr.: Epot %10g\n", *enercorr
);
2469 if (fr
->efep
!= efepNO
)
2471 *dvdlcorr
+= dvdlambda
;
2476 void do_pbc_first(FILE *fplog
, matrix box
, t_forcerec
*fr
,
2477 t_graph
*graph
, rvec x
[])
2481 fprintf(fplog
, "Removing pbc first time\n");
2483 calc_shifts(box
, fr
->shift_vec
);
2486 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
2489 p_graph(debug
, "do_pbc_first 1", graph
);
2491 shift_self(graph
, box
, x
);
2492 /* By doing an extra mk_mshift the molecules that are broken
2493 * because they were e.g. imported from another software
2494 * will be made whole again. Such are the healing powers
2497 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
2500 p_graph(debug
, "do_pbc_first 2", graph
);
2505 fprintf(fplog
, "Done rmpbc\n");
2509 static void low_do_pbc_mtop(FILE *fplog
, int ePBC
, matrix box
,
2510 const gmx_mtop_t
*mtop
, rvec x
[],
2515 gmx_molblock_t
*molb
;
2517 if (bFirst
&& fplog
)
2519 fprintf(fplog
, "Removing pbc first time\n");
2524 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
2526 molb
= &mtop
->molblock
[mb
];
2527 if (molb
->natoms_mol
== 1 ||
2528 (!bFirst
&& mtop
->moltype
[molb
->type
].cgs
.nr
== 1))
2530 /* Just one atom or charge group in the molecule, no PBC required */
2531 as
+= molb
->nmol
*molb
->natoms_mol
;
2535 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2536 mk_graph_ilist(NULL
, mtop
->moltype
[molb
->type
].ilist
,
2537 0, molb
->natoms_mol
, FALSE
, FALSE
, graph
);
2539 for (mol
= 0; mol
< molb
->nmol
; mol
++)
2541 mk_mshift(fplog
, graph
, ePBC
, box
, x
+as
);
2543 shift_self(graph
, box
, x
+as
);
2544 /* The molecule is whole now.
2545 * We don't need the second mk_mshift call as in do_pbc_first,
2546 * since we no longer need this graph.
2549 as
+= molb
->natoms_mol
;
2557 void do_pbc_first_mtop(FILE *fplog
, int ePBC
, matrix box
,
2558 const gmx_mtop_t
*mtop
, rvec x
[])
2560 low_do_pbc_mtop(fplog
, ePBC
, box
, mtop
, x
, TRUE
);
2563 void do_pbc_mtop(FILE *fplog
, int ePBC
, matrix box
,
2564 gmx_mtop_t
*mtop
, rvec x
[])
2566 low_do_pbc_mtop(fplog
, ePBC
, box
, mtop
, x
, FALSE
);
2569 // TODO This can be cleaned up a lot, and move back to runner.cpp
2570 void finish_run(FILE *fplog
, t_commrec
*cr
,
2571 t_inputrec
*inputrec
,
2572 t_nrnb nrnb
[], gmx_wallcycle_t wcycle
,
2573 gmx_walltime_accounting_t walltime_accounting
,
2574 nonbonded_verlet_t
*nbv
,
2575 gmx_bool bWriteStat
)
2577 t_nrnb
*nrnb_tot
= NULL
;
2579 double nbfs
= 0, mflop
= 0;
2580 double elapsed_time
,
2581 elapsed_time_over_all_ranks
,
2582 elapsed_time_over_all_threads
,
2583 elapsed_time_over_all_threads_over_all_ranks
;
2589 MPI_Allreduce(nrnb
->n
, nrnb_tot
->n
, eNRNB
, MPI_DOUBLE
, MPI_SUM
,
2590 cr
->mpi_comm_mysim
);
2598 elapsed_time
= walltime_accounting_get_elapsed_time(walltime_accounting
);
2599 elapsed_time_over_all_ranks
= elapsed_time
;
2600 elapsed_time_over_all_threads
= walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting
);
2601 elapsed_time_over_all_threads_over_all_ranks
= elapsed_time_over_all_threads
;
2605 /* reduce elapsed_time over all MPI ranks in the current simulation */
2606 MPI_Allreduce(&elapsed_time
,
2607 &elapsed_time_over_all_ranks
,
2608 1, MPI_DOUBLE
, MPI_SUM
,
2609 cr
->mpi_comm_mysim
);
2610 elapsed_time_over_all_ranks
/= cr
->nnodes
;
2611 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2612 * current simulation. */
2613 MPI_Allreduce(&elapsed_time_over_all_threads
,
2614 &elapsed_time_over_all_threads_over_all_ranks
,
2615 1, MPI_DOUBLE
, MPI_SUM
,
2616 cr
->mpi_comm_mysim
);
2622 print_flop(fplog
, nrnb_tot
, &nbfs
, &mflop
);
2629 if ((cr
->duty
& DUTY_PP
) && DOMAINDECOMP(cr
))
2631 print_dd_statistics(cr
, inputrec
, fplog
);
2634 /* TODO Move the responsibility for any scaling by thread counts
2635 * to the code that handled the thread region, so that there's a
2636 * mechanism to keep cycle counting working during the transition
2637 * to task parallelism. */
2638 int nthreads_pp
= gmx_omp_nthreads_get(emntNonbonded
);
2639 int nthreads_pme
= gmx_omp_nthreads_get(emntPME
);
2640 wallcycle_scale_by_num_threads(wcycle
, cr
->duty
== DUTY_PME
, nthreads_pp
, nthreads_pme
);
2641 auto cycle_sum(wallcycle_sum(cr
, wcycle
));
2645 struct gmx_wallclock_gpu_t
* gputimes
= use_GPU(nbv
) ? nbnxn_gpu_get_timings(nbv
->gpu_nbv
) : NULL
;
2647 wallcycle_print(fplog
, cr
->nnodes
, cr
->npmenodes
, nthreads_pp
, nthreads_pme
,
2648 elapsed_time_over_all_ranks
,
2649 wcycle
, cycle_sum
, gputimes
);
2651 if (EI_DYNAMICS(inputrec
->eI
))
2653 delta_t
= inputrec
->delta_t
;
2658 print_perf(fplog
, elapsed_time_over_all_threads_over_all_ranks
,
2659 elapsed_time_over_all_ranks
,
2660 walltime_accounting_get_nsteps_done(walltime_accounting
),
2661 delta_t
, nbfs
, mflop
);
2665 print_perf(stderr
, elapsed_time_over_all_threads_over_all_ranks
,
2666 elapsed_time_over_all_ranks
,
2667 walltime_accounting_get_nsteps_done(walltime_accounting
),
2668 delta_t
, nbfs
, mflop
);
2673 extern void initialize_lambdas(FILE *fplog
, t_inputrec
*ir
, int *fep_state
, real
*lambda
, double *lam0
)
2675 /* this function works, but could probably use a logic rewrite to keep all the different
2676 types of efep straight. */
2679 t_lambda
*fep
= ir
->fepvals
;
2681 if ((ir
->efep
== efepNO
) && (ir
->bSimTemp
== FALSE
))
2683 for (i
= 0; i
< efptNR
; i
++)
2695 *fep_state
= fep
->init_fep_state
; /* this might overwrite the checkpoint
2696 if checkpoint is set -- a kludge is in for now
2698 for (i
= 0; i
< efptNR
; i
++)
2700 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2701 if (fep
->init_lambda
>= 0) /* if it's -1, it was never initializd */
2703 lambda
[i
] = fep
->init_lambda
;
2706 lam0
[i
] = lambda
[i
];
2711 lambda
[i
] = fep
->all_lambda
[i
][*fep_state
];
2714 lam0
[i
] = lambda
[i
];
2720 /* need to rescale control temperatures to match current state */
2721 for (i
= 0; i
< ir
->opts
.ngtc
; i
++)
2723 if (ir
->opts
.ref_t
[i
] > 0)
2725 ir
->opts
.ref_t
[i
] = ir
->simtempvals
->temperatures
[*fep_state
];
2731 /* Send to the log the information on the current lambdas */
2734 fprintf(fplog
, "Initial vector of lambda components:[ ");
2735 for (i
= 0; i
< efptNR
; i
++)
2737 fprintf(fplog
, "%10.4f ", lambda
[i
]);
2739 fprintf(fplog
, "]\n");
2745 void init_md(FILE *fplog
,
2746 t_commrec
*cr
, t_inputrec
*ir
, const gmx_output_env_t
*oenv
,
2747 double *t
, double *t0
,
2748 real
*lambda
, int *fep_state
, double *lam0
,
2749 t_nrnb
*nrnb
, gmx_mtop_t
*mtop
,
2751 int nfile
, const t_filenm fnm
[],
2752 gmx_mdoutf_t
*outf
, t_mdebin
**mdebin
,
2753 tensor force_vir
, tensor shake_vir
, rvec mu_tot
,
2754 gmx_bool
*bSimAnn
, t_vcm
**vcm
, unsigned long Flags
,
2755 gmx_wallcycle_t wcycle
)
2759 /* Initial values */
2760 *t
= *t0
= ir
->init_t
;
2763 for (i
= 0; i
< ir
->opts
.ngtc
; i
++)
2765 /* set bSimAnn if any group is being annealed */
2766 if (ir
->opts
.annealing
[i
] != eannNO
)
2773 update_annealing_target_temp(&(ir
->opts
), ir
->init_t
);
2776 /* Initialize lambda variables */
2777 initialize_lambdas(fplog
, ir
, fep_state
, lambda
, lam0
);
2781 *upd
= init_update(ir
);
2787 *vcm
= init_vcm(fplog
, &mtop
->groups
, ir
);
2790 if (EI_DYNAMICS(ir
->eI
) && !(Flags
& MD_APPENDFILES
))
2792 if (ir
->etc
== etcBERENDSEN
)
2794 please_cite(fplog
, "Berendsen84a");
2796 if (ir
->etc
== etcVRESCALE
)
2798 please_cite(fplog
, "Bussi2007a");
2800 if (ir
->eI
== eiSD1
)
2802 please_cite(fplog
, "Goga2012");
2805 if ((ir
->et
[XX
].n
> 0) || (ir
->et
[YY
].n
> 0) || (ir
->et
[ZZ
].n
> 0))
2807 please_cite(fplog
, "Caleman2008a");
2813 *outf
= init_mdoutf(fplog
, nfile
, fnm
, Flags
, cr
, ir
, mtop
, oenv
, wcycle
);
2815 *mdebin
= init_mdebin((Flags
& MD_APPENDFILES
) ? NULL
: mdoutf_get_fp_ene(*outf
),
2816 mtop
, ir
, mdoutf_get_fp_dhdl(*outf
));
2819 /* Initiate variables */
2820 clear_mat(force_vir
);
2821 clear_mat(shake_vir
);