Simplified uniform GPU selection in CMake
[gromacs.git] / src / gromacs / gmxlib / nrnb.cpp
blob1366d351e51c93f6abbec57f2234fd6300bea1e5
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2017,2018 by the GROMACS development team.
7 * Copyright (c) 2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "gmxpre.h"
40 #include "nrnb.h"
42 #include <cstdlib>
43 #include <cstring>
45 #include <algorithm>
46 #include <vector>
48 #include "gromacs/mdtypes/commrec.h"
49 #include "gromacs/mdtypes/md_enums.h"
50 #include "gromacs/utility/arraysize.h"
52 typedef struct
54 const char* name;
55 int flop;
56 } t_nrnb_data;
59 static const t_nrnb_data nbdata[eNRNB] = {
60 /* These are re-used for different NB kernels, since there are so many.
61 * The actual number of flops is set dynamically.
63 { "NB VdW [V&F]", 1 },
64 { "NB VdW [F]", 1 },
65 { "NB Elec. [V&F]", 1 },
66 { "NB Elec. [F]", 1 },
67 { "NB Elec. [W3,V&F]", 1 },
68 { "NB Elec. [W3,F]", 1 },
69 { "NB Elec. [W3-W3,V&F]", 1 },
70 { "NB Elec. [W3-W3,F]", 1 },
71 { "NB Elec. [W4,V&F]", 1 },
72 { "NB Elec. [W4,F]", 1 },
73 { "NB Elec. [W4-W4,V&F]", 1 },
74 { "NB Elec. [W4-W4,F]", 1 },
75 { "NB VdW & Elec. [V&F]", 1 },
76 { "NB VdW & Elec. [F]", 1 },
77 { "NB VdW & Elec. [W3,V&F]", 1 },
78 { "NB VdW & Elec. [W3,F]", 1 },
79 { "NB VdW & Elec. [W3-W3,V&F]", 1 },
80 { "NB VdW & Elec. [W3-W3,F]", 1 },
81 { "NB VdW & Elec. [W4,V&F]", 1 },
82 { "NB VdW & Elec. [W4,F]", 1 },
83 { "NB VdW & Elec. [W4-W4,V&F]", 1 },
84 { "NB VdW & Elec. [W4-W4,F]", 1 },
86 { "NB Generic kernel", 1 },
87 { "NB Generic charge grp kernel", 1 },
88 { "NB Free energy kernel", 1 },
90 { "Pair Search distance check", 9 }, /* nbnxn pair dist. check */
91 /* nbnxn kernel flops are based on inner-loops without exclusion checks.
92 * Plain Coulomb runs through the RF kernels, except with GPUs.
93 * invsqrt is counted as 6 flops: 1 for _mm_rsqt_ps + 5 for iteration.
94 * The flops are equal for plain-C, x86 SIMD and GPUs, except for:
95 * - plain-C kernel uses one flop more for Coulomb-only (F) than listed
96 * - x86 SIMD LJ geom-comb.rule kernels (fastest) use 2 more flops
97 * - x86 SIMD LJ LB-comb.rule kernels (fast) use 3 (8 for F+E) more flops
98 * - GPU always does exclusions, which requires 2-4 flops, but as invsqrt
99 * is always counted as 6 flops, this roughly compensates.
101 { "NxN RF Elec. + LJ [F]", 38 }, /* nbnxn kernel LJ+RF, no ener */
102 { "NxN RF Elec. + LJ [V&F]", 54 },
103 { "NxN QSTab Elec. + LJ [F]", 41 }, /* nbnxn kernel LJ+tab, no en */
104 { "NxN QSTab Elec. + LJ [V&F]", 59 },
105 { "NxN Ewald Elec. + LJ [F]", 66 }, /* nbnxn kernel LJ+Ewald, no en */
106 { "NxN Ewald Elec. + LJ [V&F]", 107 },
107 { "NxN LJ [F]", 33 }, /* nbnxn kernel LJ, no ener */
108 { "NxN LJ [V&F]", 43 },
109 { "NxN RF Electrostatics [F]", 31 }, /* nbnxn kernel RF, no ener */
110 { "NxN RF Electrostatics [V&F]", 36 },
111 { "NxN QSTab Elec. [F]", 34 }, /* nbnxn kernel tab, no ener */
112 { "NxN QSTab Elec. [V&F]", 41 },
113 { "NxN Ewald Elec. [F]", 61 }, /* nbnxn kernel Ewald, no ener */
114 { "NxN Ewald Elec. [V&F]", 84 },
115 /* The switch function flops should be added to the LJ kernels above */
116 { "NxN LJ add F-switch [F]", 12 }, /* extra cost for LJ F-switch */
117 { "NxN LJ add F-switch [V&F]", 22 },
118 { "NxN LJ add P-switch [F]", 27 }, /* extra cost for LJ P-switch */
119 { "NxN LJ add P-switch [V&F]", 20 },
120 { "NxN LJ add LJ Ewald [F]", 36 }, /* extra cost for LJ Ewald */
121 { "NxN LJ add LJ Ewald [V&F]", 33 },
122 { "1,4 nonbonded interactions", 90 },
123 { "Calc Weights", 36 },
124 { "Spread Q", 6 },
125 { "Spread Q Bspline", 2 },
126 { "Gather F", 23 },
127 { "Gather F Bspline", 6 },
128 { "3D-FFT", 8 },
129 { "Convolution", 4 },
130 { "Solve PME", 64 },
131 { "NS-Pairs", 21 },
132 { "Reset In Box", 3 },
133 { "Shift-X", 6 },
134 { "CG-CoM", 3 },
135 { "Sum Forces", 1 },
136 { "Bonds", 59 },
137 { "G96Bonds", 44 },
138 { "FENE Bonds", 58 },
139 { "Tab. Bonds", 62 },
140 { "Restraint Potential", 86 },
141 { "Linear Angles", 57 },
142 { "Angles", 168 },
143 { "G96Angles", 150 },
144 { "Quartic Angles", 160 },
145 { "Tab. Angles", 169 },
146 { "Propers", 229 },
147 { "Impropers", 208 },
148 { "RB-Dihedrals", 247 },
149 { "Four. Dihedrals", 247 },
150 { "Tab. Dihedrals", 227 },
151 { "Dist. Restr.", 200 },
152 { "Orient. Restr.", 200 },
153 { "Dihedral Restr.", 200 },
154 { "Pos. Restr.", 50 },
155 { "Flat-bottom posres", 50 },
156 { "Angle Restr.", 191 },
157 { "Angle Restr. Z", 164 },
158 { "Morse Potent.", 83 },
159 { "Cubic Bonds", 54 },
160 { "Walls", 31 },
161 { "Polarization", 59 },
162 { "Anharmonic Polarization", 72 },
163 { "Water Pol.", 62 },
164 { "Thole Pol.", 296 },
165 { "Virial", 18 },
166 { "Update", 31 },
167 { "Ext.ens. Update", 54 },
168 { "Stop-CM", 10 },
169 { "P-Coupling", 6 },
170 { "Calc-Ekin", 27 },
171 { "Lincs", 60 },
172 { "Lincs-Mat", 4 },
173 { "Shake", 30 },
174 { "Constraint-V", 9 },
175 { "Shake-Init", 10 },
176 { "Constraint-Vir", 24 },
177 { "Settle", 370 },
178 { "Virtual Site 1", 1 },
179 { "Virtual Site 2", 23 },
180 { "Virtual Site 2fd", 63 },
181 { "Virtual Site 3", 37 },
182 { "Virtual Site 3fd", 95 },
183 { "Virtual Site 3fad", 176 },
184 { "Virtual Site 3out", 87 },
185 { "Virtual Site 4fd", 110 },
186 { "Virtual Site 4fdn", 254 },
187 { "Virtual Site N", 15 },
188 { "CMAP", 1700 }, // Estimate!
189 { "Urey-Bradley", 183 },
190 { "Cross-Bond-Bond", 163 },
191 { "Cross-Bond-Angle", 163 }
194 static void pr_two(FILE* out, int c, int i)
196 if (i < 10)
198 fprintf(out, "%c0%1d", c, i);
200 else
202 fprintf(out, "%c%2d", c, i);
206 static void pr_difftime(FILE* out, double dt)
208 int ndays, nhours, nmins, nsecs;
209 gmx_bool bPrint, bPrinted;
211 ndays = static_cast<int>(dt / (24 * 3600));
212 dt = dt - 24 * 3600 * ndays;
213 nhours = static_cast<int>(dt / 3600);
214 dt = dt - 3600 * nhours;
215 nmins = static_cast<int>(dt / 60);
216 dt = dt - nmins * 60;
217 nsecs = static_cast<int>(dt);
218 bPrint = (ndays > 0);
219 bPrinted = bPrint;
220 if (bPrint)
222 fprintf(out, "%d", ndays);
224 bPrint = bPrint || (nhours > 0);
225 if (bPrint)
227 if (bPrinted)
229 pr_two(out, 'd', nhours);
231 else
233 fprintf(out, "%d", nhours);
236 bPrinted = bPrinted || bPrint;
237 bPrint = bPrint || (nmins > 0);
238 if (bPrint)
240 if (bPrinted)
242 pr_two(out, 'h', nmins);
244 else
246 fprintf(out, "%d", nmins);
249 bPrinted = bPrinted || bPrint;
250 if (bPrinted)
252 pr_two(out, ':', nsecs);
254 else
256 fprintf(out, "%ds", nsecs);
258 fprintf(out, "\n");
261 void clear_nrnb(t_nrnb* nrnb)
263 int i;
265 for (i = 0; (i < eNRNB); i++)
267 nrnb->n[i] = 0.0;
271 void add_nrnb(t_nrnb* dest, t_nrnb* s1, t_nrnb* s2)
273 int i;
275 for (i = 0; (i < eNRNB); i++)
277 dest->n[i] = s1->n[i] + s2->n[i];
281 void print_nrnb(FILE* out, t_nrnb* nrnb)
283 int i;
285 for (i = 0; (i < eNRNB); i++)
287 if (nrnb->n[i] > 0)
289 fprintf(out, " %-26s %10.0f.\n", nbdata[i].name, nrnb->n[i]);
294 void _inc_nrnb(t_nrnb* nrnb, int enr, int inc, char gmx_unused* file, int gmx_unused line)
296 nrnb->n[enr] += inc;
297 #ifdef DEBUG_NRNB
298 printf("nrnb %15s(%2d) incremented with %8d from file %s line %d\n", nbdata[enr].name, enr, inc,
299 file, line);
300 #endif
303 /* Returns in enr is the index of a full nbnxn VdW kernel */
304 static gmx_bool nrnb_is_nbnxn_vdw_kernel(int enr)
306 return (enr >= eNR_NBNXN_LJ_RF && enr <= eNR_NBNXN_LJ_E);
309 /* Returns in enr is the index of an nbnxn kernel addition (LJ modification) */
310 static gmx_bool nrnb_is_nbnxn_kernel_addition(int enr)
312 return (enr >= eNR_NBNXN_ADD_LJ_FSW && enr <= eNR_NBNXN_ADD_LJ_EWALD_E);
315 void print_flop(FILE* out, t_nrnb* nrnb, double* nbfs, double* mflop)
317 int i, j;
318 double mni, frac, tfrac, tflop;
319 const char* myline =
320 "-----------------------------------------------------------------------------";
322 *nbfs = 0.0;
323 for (i = 0; (i < eNR_NBKERNEL_TOTAL_NR); i++)
325 if (std::strstr(nbdata[i].name, "W3-W3") != nullptr)
327 *nbfs += 9e-6 * nrnb->n[i];
329 else if (std::strstr(nbdata[i].name, "W3") != nullptr)
331 *nbfs += 3e-6 * nrnb->n[i];
333 else if (std::strstr(nbdata[i].name, "W4-W4") != nullptr)
335 *nbfs += 10e-6 * nrnb->n[i];
337 else if (std::strstr(nbdata[i].name, "W4") != nullptr)
339 *nbfs += 4e-6 * nrnb->n[i];
341 else
343 *nbfs += 1e-6 * nrnb->n[i];
346 tflop = 0;
347 for (i = 0; (i < eNRNB); i++)
349 tflop += 1e-6 * nrnb->n[i] * nbdata[i].flop;
352 if (tflop == 0)
354 fprintf(out, "No MEGA Flopsen this time\n");
355 return;
357 if (out)
359 fprintf(out, "\n\tM E G A - F L O P S A C C O U N T I N G\n\n");
362 if (out)
364 fprintf(out, " NB=Group-cutoff nonbonded kernels NxN=N-by-N cluster Verlet kernels\n");
365 fprintf(out, " RF=Reaction-Field VdW=Van der Waals QSTab=quadratic-spline table\n");
366 fprintf(out, " W3=SPC/TIP3p W4=TIP4p (single or pairs)\n");
367 fprintf(out, " V&F=Potential and force V=Potential only F=Force only\n\n");
369 fprintf(out, " %-32s %16s %15s %7s\n", "Computing:", "M-Number", "M-Flops", "% Flops");
370 fprintf(out, "%s\n", myline);
372 *mflop = 0.0;
373 tfrac = 0.0;
374 for (i = 0; (i < eNRNB); i++)
376 mni = 1e-6 * nrnb->n[i];
377 /* Skip empty entries and nbnxn additional flops,
378 * which have been added to the kernel entry.
380 if (mni > 0 && !nrnb_is_nbnxn_kernel_addition(i))
382 int flop;
384 flop = nbdata[i].flop;
385 if (nrnb_is_nbnxn_vdw_kernel(i))
387 /* Possibly add the cost of an LJ switch/Ewald function */
388 for (j = eNR_NBNXN_ADD_LJ_FSW; j <= eNR_NBNXN_ADD_LJ_EWALD; j += 2)
390 int e_kernel_add;
392 /* Select the force or energy flop count */
393 e_kernel_add = j + ((i - eNR_NBNXN_LJ_RF) % 2);
395 if (nrnb->n[e_kernel_add] > 0)
397 flop += nbdata[e_kernel_add].flop;
401 *mflop += mni * flop;
402 frac = 100.0 * mni * flop / tflop;
403 tfrac += frac;
404 if (out != nullptr)
406 fprintf(out, " %-32s %16.6f %15.3f %6.1f\n", nbdata[i].name, mni, mni * flop, frac);
410 if (out)
412 fprintf(out, "%s\n", myline);
413 fprintf(out, " %-32s %16s %15.3f %6.1f\n", "Total", "", *mflop, tfrac);
414 fprintf(out, "%s\n\n", myline);
416 if (nrnb->n[eNR_NBKERNEL_GENERIC] > 0)
418 fprintf(out,
419 "WARNING: Using the slow generic C kernel. This is fine if you are\n"
420 "comparing different implementations or MD software. Routine\n"
421 "simulations should use a different non-bonded setup for much better\n"
422 "performance.\n\n");
427 void print_perf(FILE* out,
428 double time_per_thread,
429 double time_per_node,
430 int64_t nsteps,
431 double delta_t,
432 double nbfs,
433 double mflop)
435 double wallclocktime;
437 fprintf(out, "\n");
439 if (time_per_node > 0)
441 fprintf(out, "%12s %12s %12s %10s\n", "", "Core t (s)", "Wall t (s)", "(%)");
442 fprintf(out, "%12s %12.3f %12.3f %10.1f\n", "Time:", time_per_thread, time_per_node,
443 100.0 * time_per_thread / time_per_node);
444 /* only print day-hour-sec format if time_per_node is more than 30 min */
445 if (time_per_node > 30 * 60)
447 fprintf(out, "%12s %12s", "", "");
448 pr_difftime(out, time_per_node);
450 if (delta_t > 0)
452 mflop = mflop / time_per_node;
453 wallclocktime = nsteps * delta_t;
455 if (getenv("GMX_DETAILED_PERF_STATS") == nullptr)
457 fprintf(out, "%12s %12s %12s\n", "", "(ns/day)", "(hour/ns)");
458 fprintf(out, "%12s %12.3f %12.3f\n", "Performance:", wallclocktime * 24 * 3.6 / time_per_node,
459 1000 * time_per_node / (3600 * wallclocktime));
461 else
463 fprintf(out, "%12s %12s %12s %12s %12s\n", "", "(Mnbf/s)",
464 (mflop > 1000) ? "(GFlops)" : "(MFlops)", "(ns/day)", "(hour/ns)");
465 fprintf(out, "%12s %12.3f %12.3f %12.3f %12.3f\n", "Performance:", nbfs / time_per_node,
466 (mflop > 1000) ? (mflop / 1000) : mflop, wallclocktime * 24 * 3.6 / time_per_node,
467 1000 * time_per_node / (3600 * wallclocktime));
470 else
472 if (getenv("GMX_DETAILED_PERF_STATS") == nullptr)
474 fprintf(out, "%12s %14s\n", "", "(steps/hour)");
475 fprintf(out, "%12s %14.1f\n", "Performance:", nsteps * 3600.0 / time_per_node);
477 else
479 fprintf(out, "%12s %12s %12s %14s\n", "", "(Mnbf/s)",
480 (mflop > 1000) ? "(GFlops)" : "(MFlops)", "(steps/hour)");
481 fprintf(out, "%12s %12.3f %12.3f %14.1f\n", "Performance:", nbfs / time_per_node,
482 (mflop > 1000) ? (mflop / 1000) : mflop, nsteps * 3600.0 / time_per_node);
488 int cost_nrnb(int enr)
490 return nbdata[enr].flop;
493 const char* nrnb_str(int enr)
495 return nbdata[enr].name;
498 static const int force_index[] = {
499 eNR_BONDS, eNR_ANGLES, eNR_PROPER, eNR_IMPROPER, eNR_RB,
500 eNR_DISRES, eNR_ORIRES, eNR_POSRES, eNR_FBPOSRES, eNR_NS,
502 #define NFORCE_INDEX asize(force_index)
504 static const int constr_index[] = { eNR_SHAKE, eNR_SHAKE_RIJ, eNR_SETTLE, eNR_UPDATE,
505 eNR_PCOUPL, eNR_CONSTR_VIR, eNR_CONSTR_V };
506 #define NCONSTR_INDEX asize(constr_index)
508 static double pr_av(FILE* log, t_commrec* cr, double fav, const double ftot[], const char* title)
510 int i, perc;
511 double dperc, unb;
513 unb = 0;
514 if (fav > 0)
516 fav /= cr->nnodes - cr->npmenodes;
517 fprintf(log, "\n %-26s", title);
518 for (i = 0; (i < cr->nnodes); i++)
520 dperc = (100.0 * ftot[i]) / fav;
521 unb = std::max(unb, dperc);
522 perc = static_cast<int>(dperc);
523 fprintf(log, "%3d ", perc);
525 if (unb > 0)
527 perc = static_cast<int>(10000.0 / unb);
528 fprintf(log, "%6d%%\n\n", perc);
530 else
532 fprintf(log, "\n\n");
535 return unb;
538 void pr_load(FILE* log, t_commrec* cr, t_nrnb nrnb[])
540 t_nrnb av;
542 std::vector<double> ftot(cr->nnodes);
543 std::vector<double> stot(cr->nnodes);
544 for (int i = 0; (i < cr->nnodes); i++)
546 add_nrnb(&av, &av, &(nrnb[i]));
547 /* Cost due to forces */
548 for (int j = 0; (j < eNR_NBKERNEL_TOTAL_NR); j++)
550 ftot[i] += nrnb[i].n[j] * cost_nrnb(j);
552 for (int j = 0; (j < NFORCE_INDEX); j++)
554 ftot[i] += nrnb[i].n[force_index[j]] * cost_nrnb(force_index[j]);
556 /* Due to shake */
557 for (int j = 0; (j < NCONSTR_INDEX); j++)
559 stot[i] += nrnb[i].n[constr_index[j]] * cost_nrnb(constr_index[j]);
562 for (int j = 0; (j < eNRNB); j++)
564 av.n[j] = av.n[j] / static_cast<double>(cr->nnodes - cr->npmenodes);
567 fprintf(log, "\nDetailed load balancing info in percentage of average\n");
569 fprintf(log, " Type RANK:");
570 for (int i = 0; (i < cr->nnodes); i++)
572 fprintf(log, "%3d ", i);
574 fprintf(log, "Scaling\n");
575 fprintf(log, "---------------------------");
576 for (int i = 0; (i < cr->nnodes); i++)
578 fprintf(log, "----");
580 fprintf(log, "-------\n");
582 for (int j = 0; (j < eNRNB); j++)
584 double unb = 100.0;
585 if (av.n[j] > 0)
587 double dperc;
588 int perc;
589 fprintf(log, " %-26s", nrnb_str(j));
590 for (int i = 0; (i < cr->nnodes); i++)
592 dperc = (100.0 * nrnb[i].n[j]) / av.n[j];
593 unb = std::max(unb, dperc);
594 perc = static_cast<int>(dperc);
595 fprintf(log, "%3d ", perc);
597 if (unb > 0)
599 perc = static_cast<int>(10000.0 / unb);
600 fprintf(log, "%6d%%\n", perc);
602 else
604 fprintf(log, "\n");
608 double fav = 0;
609 double sav = 0;
610 for (int i = 0; (i < cr->nnodes); i++)
612 fav += ftot[i];
613 sav += stot[i];
615 double uf = pr_av(log, cr, fav, ftot.data(), "Total Force");
616 double us = pr_av(log, cr, sav, stot.data(), "Total Constr.");
618 double unb = (uf * fav + us * sav) / (fav + sav);
619 if (unb > 0)
621 unb = 10000.0 / unb;
622 fprintf(log, "\nTotal Scaling: %.0f%% of max performance\n\n", unb);