2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 #include "nbnxn_search.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/utilities.h"
53 #include "gromacs/math/vec.h"
54 #include "gromacs/mdlib/gmx_omp_nthreads.h"
55 #include "gromacs/mdlib/nb_verlet.h"
56 #include "gromacs/mdlib/nbnxn_atomdata.h"
57 #include "gromacs/mdlib/nbnxn_consts.h"
58 #include "gromacs/mdlib/nbnxn_grid.h"
59 #include "gromacs/mdlib/nbnxn_internal.h"
60 #include "gromacs/mdlib/nbnxn_simd.h"
61 #include "gromacs/mdlib/nbnxn_util.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdtypes/group.h"
64 #include "gromacs/mdtypes/md_enums.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/simd/simd.h"
68 #include "gromacs/simd/vector_operations.h"
69 #include "gromacs/utility/exceptions.h"
70 #include "gromacs/utility/fatalerror.h"
71 #include "gromacs/utility/gmxomp.h"
72 #include "gromacs/utility/smalloc.h"
74 using namespace gmx
; // TODO: Remove when this file is moved into gmx namespace
77 /* We shift the i-particles backward for PBC.
78 * This leads to more conditionals than shifting forward.
79 * We do this to get more balanced pair lists.
81 static const bool pbc_shift_backward
= true;
84 static void nbs_cycle_clear(nbnxn_cycle_t
*cc
)
86 for (int i
= 0; i
< enbsCCnr
; i
++)
93 static double Mcyc_av(const nbnxn_cycle_t
*cc
)
95 return (double)cc
->c
*1e-6/cc
->count
;
98 static void nbs_cycle_print(FILE *fp
, const nbnxn_search_t nbs
)
101 fprintf(fp
, "ns %4d grid %4.1f search %4.1f red.f %5.3f",
102 nbs
->cc
[enbsCCgrid
].count
,
103 Mcyc_av(&nbs
->cc
[enbsCCgrid
]),
104 Mcyc_av(&nbs
->cc
[enbsCCsearch
]),
105 Mcyc_av(&nbs
->cc
[enbsCCreducef
]));
107 if (nbs
->nthread_max
> 1)
109 if (nbs
->cc
[enbsCCcombine
].count
> 0)
111 fprintf(fp
, " comb %5.2f",
112 Mcyc_av(&nbs
->cc
[enbsCCcombine
]));
114 fprintf(fp
, " s. th");
115 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
117 fprintf(fp
, " %4.1f",
118 Mcyc_av(&nbs
->work
[t
].cc
[enbsCCsearch
]));
124 static gmx_inline
int ci_to_cj(int ci
, int na_cj_2log
)
128 case 2: return ci
; break;
129 case 3: return (ci
>>1); break;
130 case 1: return (ci
<<1); break;
138 /* Returns the j-cluster index corresponding to the i-cluster index */
139 template<int cj_size
> static gmx_inline
int ci_to_cj(int ci
)
145 else if (cj_size
== 4)
149 else if (cj_size
== 8)
155 GMX_ASSERT(false, "Only j-cluster sizes 2, 4, 8 are implemented");
160 /* Returns the index in the coordinate array corresponding to the i-cluster index */
161 template<int cj_size
> static gmx_inline
int x_ind_ci(int ci
)
165 /* Coordinates are stored packed in groups of 4 */
168 else if (cj_size
== 8)
170 /* Coordinates packed in 8, i-cluster size is half the packing width */
171 return (ci
>> 1)*STRIDE_P8
+ (ci
& 1)*(c_packX8
>> 1);
175 GMX_ASSERT(false, "Only j-cluster sizes 2, 4, 8 are implemented");
180 /* Returns the index in the coordinate array corresponding to the j-cluster index */
181 template<int cj_size
> static gmx_inline
int x_ind_cj(int cj
)
185 /* Coordinates are stored packed in groups of 4 */
186 return (cj
>> 1)*STRIDE_P4
+ (cj
& 1)*(c_packX4
>> 1);
188 else if (cj_size
<= 4)
190 /* Coordinates are stored packed in groups of 4 */
193 else if (cj_size
== 8)
195 /* Coordinates are stored packed in groups of 8 */
200 GMX_ASSERT(false, "Only j-cluster sizes 2, 4, 8 are implemented");
205 /* The 6 functions below are only introduced to make the code more readable */
207 static gmx_inline
int ci_to_cj_simd_4xn(int ci
)
209 return ci_to_cj
<GMX_SIMD_REAL_WIDTH
>(ci
);
212 static gmx_inline
int x_ind_ci_simd_4xn(int ci
)
214 return x_ind_ci
<GMX_SIMD_REAL_WIDTH
>(ci
);
217 static gmx_inline
int x_ind_cj_simd_4xn(int cj
)
219 return x_ind_cj
<GMX_SIMD_REAL_WIDTH
>(cj
);
222 static gmx_inline
int ci_to_cj_simd_2xnn(int ci
)
224 return ci_to_cj
<GMX_SIMD_REAL_WIDTH
/2>(ci
);
227 static gmx_inline
int x_ind_ci_simd_2xnn(int ci
)
229 return x_ind_ci
<GMX_SIMD_REAL_WIDTH
/2>(ci
);
232 static gmx_inline
int x_ind_cj_simd_2xnn(int cj
)
234 return x_ind_cj
<GMX_SIMD_REAL_WIDTH
/2>(cj
);
239 gmx_bool
nbnxn_kernel_pairlist_simple(int nb_kernel_type
)
241 if (nb_kernel_type
== nbnxnkNotSet
)
243 gmx_fatal(FARGS
, "Non-bonded kernel type not set for Verlet-style pair-list.");
246 switch (nb_kernel_type
)
248 case nbnxnk8x8x8_GPU
:
249 case nbnxnk8x8x8_PlainC
:
252 case nbnxnk4x4_PlainC
:
253 case nbnxnk4xN_SIMD_4xN
:
254 case nbnxnk4xN_SIMD_2xNN
:
258 gmx_incons("Invalid nonbonded kernel type passed!");
263 /* Initializes a single nbnxn_pairlist_t data structure */
264 static void nbnxn_init_pairlist_fep(t_nblist
*nl
)
266 nl
->type
= GMX_NBLIST_INTERACTION_FREE_ENERGY
;
267 nl
->igeometry
= GMX_NBLIST_GEOMETRY_PARTICLE_PARTICLE
;
268 /* The interaction functions are set in the free energy kernel fuction */
287 void nbnxn_init_search(nbnxn_search_t
* nbs_ptr
,
289 struct gmx_domdec_zones_t
*zones
,
301 nbs
->DomDec
= (n_dd_cells
!= NULL
);
303 clear_ivec(nbs
->dd_dim
);
309 for (int d
= 0; d
< DIM
; d
++)
311 if ((*n_dd_cells
)[d
] > 1)
314 /* Each grid matches a DD zone */
320 nbnxn_grids_init(nbs
, ngrid
);
323 nbs
->cell_nalloc
= 0;
327 nbs
->nthread_max
= nthread_max
;
329 /* Initialize the work data structures for each thread */
330 snew(nbs
->work
, nbs
->nthread_max
);
331 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
333 nbs
->work
[t
].cxy_na
= NULL
;
334 nbs
->work
[t
].cxy_na_nalloc
= 0;
335 nbs
->work
[t
].sort_work
= NULL
;
336 nbs
->work
[t
].sort_work_nalloc
= 0;
338 snew(nbs
->work
[t
].nbl_fep
, 1);
339 nbnxn_init_pairlist_fep(nbs
->work
[t
].nbl_fep
);
342 /* Initialize detailed nbsearch cycle counting */
343 nbs
->print_cycles
= (getenv("GMX_NBNXN_CYCLE") != 0);
344 nbs
->search_count
= 0;
345 nbs_cycle_clear(nbs
->cc
);
346 for (int t
= 0; t
< nbs
->nthread_max
; t
++)
348 nbs_cycle_clear(nbs
->work
[t
].cc
);
352 static void init_buffer_flags(nbnxn_buffer_flags_t
*flags
,
355 flags
->nflag
= (natoms
+ NBNXN_BUFFERFLAG_SIZE
- 1)/NBNXN_BUFFERFLAG_SIZE
;
356 if (flags
->nflag
> flags
->flag_nalloc
)
358 flags
->flag_nalloc
= over_alloc_large(flags
->nflag
);
359 srenew(flags
->flag
, flags
->flag_nalloc
);
361 for (int b
= 0; b
< flags
->nflag
; b
++)
363 bitmask_clear(&(flags
->flag
[b
]));
367 /* Determines the cell range along one dimension that
368 * the bounding box b0 - b1 sees.
370 static void get_cell_range(real b0
, real b1
,
371 int nc
, real c0
, real s
, real invs
,
372 real d2
, real r2
, int *cf
, int *cl
)
374 *cf
= std::max(static_cast<int>((b0
- c0
)*invs
), 0);
376 while (*cf
> 0 && d2
+ gmx::square((b0
- c0
) - (*cf
-1+1)*s
) < r2
)
381 *cl
= std::min(static_cast<int>((b1
- c0
)*invs
), nc
-1);
382 while (*cl
< nc
-1 && d2
+ gmx::square((*cl
+1)*s
- (b1
- c0
)) < r2
)
388 /* Reference code calculating the distance^2 between two bounding boxes */
389 static float box_dist2(float bx0
, float bx1
, float by0
,
390 float by1
, float bz0
, float bz1
,
391 const nbnxn_bb_t
*bb
)
394 float dl
, dh
, dm
, dm0
;
398 dl
= bx0
- bb
->upper
[BB_X
];
399 dh
= bb
->lower
[BB_X
] - bx1
;
400 dm
= std::max(dl
, dh
);
401 dm0
= std::max(dm
, 0.0f
);
404 dl
= by0
- bb
->upper
[BB_Y
];
405 dh
= bb
->lower
[BB_Y
] - by1
;
406 dm
= std::max(dl
, dh
);
407 dm0
= std::max(dm
, 0.0f
);
410 dl
= bz0
- bb
->upper
[BB_Z
];
411 dh
= bb
->lower
[BB_Z
] - bz1
;
412 dm
= std::max(dl
, dh
);
413 dm0
= std::max(dm
, 0.0f
);
419 /* Plain C code calculating the distance^2 between two bounding boxes */
420 static float subc_bb_dist2(int si
, const nbnxn_bb_t
*bb_i_ci
,
421 int csj
, const nbnxn_bb_t
*bb_j_all
)
423 const nbnxn_bb_t
*bb_i
, *bb_j
;
425 float dl
, dh
, dm
, dm0
;
428 bb_j
= bb_j_all
+ csj
;
432 dl
= bb_i
->lower
[BB_X
] - bb_j
->upper
[BB_X
];
433 dh
= bb_j
->lower
[BB_X
] - bb_i
->upper
[BB_X
];
434 dm
= std::max(dl
, dh
);
435 dm0
= std::max(dm
, 0.0f
);
438 dl
= bb_i
->lower
[BB_Y
] - bb_j
->upper
[BB_Y
];
439 dh
= bb_j
->lower
[BB_Y
] - bb_i
->upper
[BB_Y
];
440 dm
= std::max(dl
, dh
);
441 dm0
= std::max(dm
, 0.0f
);
444 dl
= bb_i
->lower
[BB_Z
] - bb_j
->upper
[BB_Z
];
445 dh
= bb_j
->lower
[BB_Z
] - bb_i
->upper
[BB_Z
];
446 dm
= std::max(dl
, dh
);
447 dm0
= std::max(dm
, 0.0f
);
453 #if NBNXN_SEARCH_BB_SIMD4
455 /* 4-wide SIMD code for bb distance for bb format xyz0 */
456 static float subc_bb_dist2_simd4(int si
, const nbnxn_bb_t
*bb_i_ci
,
457 int csj
, const nbnxn_bb_t
*bb_j_all
)
459 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
462 Simd4Float bb_i_S0
, bb_i_S1
;
463 Simd4Float bb_j_S0
, bb_j_S1
;
469 bb_i_S0
= load4(&bb_i_ci
[si
].lower
[0]);
470 bb_i_S1
= load4(&bb_i_ci
[si
].upper
[0]);
471 bb_j_S0
= load4(&bb_j_all
[csj
].lower
[0]);
472 bb_j_S1
= load4(&bb_j_all
[csj
].upper
[0]);
474 dl_S
= bb_i_S0
- bb_j_S1
;
475 dh_S
= bb_j_S0
- bb_i_S1
;
477 dm_S
= max(dl_S
, dh_S
);
478 dm0_S
= max(dm_S
, simd4SetZeroF());
480 return dotProduct(dm0_S
, dm0_S
);
483 /* Calculate bb bounding distances of bb_i[si,...,si+3] and store them in d2 */
484 #define SUBC_BB_DIST2_SIMD4_XXXX_INNER(si, bb_i, d2) \
488 Simd4Float dx_0, dy_0, dz_0; \
489 Simd4Float dx_1, dy_1, dz_1; \
491 Simd4Float mx, my, mz; \
492 Simd4Float m0x, m0y, m0z; \
494 Simd4Float d2x, d2y, d2z; \
495 Simd4Float d2s, d2t; \
497 shi = si*NNBSBB_D*DIM; \
499 xi_l = load4(bb_i+shi+0*STRIDE_PBB); \
500 yi_l = load4(bb_i+shi+1*STRIDE_PBB); \
501 zi_l = load4(bb_i+shi+2*STRIDE_PBB); \
502 xi_h = load4(bb_i+shi+3*STRIDE_PBB); \
503 yi_h = load4(bb_i+shi+4*STRIDE_PBB); \
504 zi_h = load4(bb_i+shi+5*STRIDE_PBB); \
506 dx_0 = xi_l - xj_h; \
507 dy_0 = yi_l - yj_h; \
508 dz_0 = zi_l - zj_h; \
510 dx_1 = xj_l - xi_h; \
511 dy_1 = yj_l - yi_h; \
512 dz_1 = zj_l - zi_h; \
514 mx = max(dx_0, dx_1); \
515 my = max(dy_0, dy_1); \
516 mz = max(dz_0, dz_1); \
518 m0x = max(mx, zero); \
519 m0y = max(my, zero); \
520 m0z = max(mz, zero); \
529 store4(d2+si, d2t); \
532 /* 4-wide SIMD code for nsi bb distances for bb format xxxxyyyyzzzz */
533 static void subc_bb_dist2_simd4_xxxx(const float *bb_j
,
534 int nsi
, const float *bb_i
,
537 // TODO: During SIMDv2 transition only some archs use namespace (remove when done)
540 Simd4Float xj_l
, yj_l
, zj_l
;
541 Simd4Float xj_h
, yj_h
, zj_h
;
542 Simd4Float xi_l
, yi_l
, zi_l
;
543 Simd4Float xi_h
, yi_h
, zi_h
;
549 xj_l
= Simd4Float(bb_j
[0*STRIDE_PBB
]);
550 yj_l
= Simd4Float(bb_j
[1*STRIDE_PBB
]);
551 zj_l
= Simd4Float(bb_j
[2*STRIDE_PBB
]);
552 xj_h
= Simd4Float(bb_j
[3*STRIDE_PBB
]);
553 yj_h
= Simd4Float(bb_j
[4*STRIDE_PBB
]);
554 zj_h
= Simd4Float(bb_j
[5*STRIDE_PBB
]);
556 /* Here we "loop" over si (0,STRIDE_PBB) from 0 to nsi with step STRIDE_PBB.
557 * But as we know the number of iterations is 1 or 2, we unroll manually.
559 SUBC_BB_DIST2_SIMD4_XXXX_INNER(0, bb_i
, d2
);
560 if (STRIDE_PBB
< nsi
)
562 SUBC_BB_DIST2_SIMD4_XXXX_INNER(STRIDE_PBB
, bb_i
, d2
);
566 #endif /* NBNXN_SEARCH_BB_SIMD4 */
569 /* Returns if any atom pair from two clusters is within distance sqrt(rl2) */
570 static gmx_inline gmx_bool
571 clusterpair_in_range(const nbnxn_list_work_t
*work
,
573 int csj
, int stride
, const real
*x_j
,
576 #if !GMX_SIMD4_HAVE_REAL
579 * All coordinates are stored as xyzxyz...
582 const real
*x_i
= work
->x_ci
;
584 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
++)
586 int i0
= (si
*c_nbnxnGpuClusterSize
+ i
)*DIM
;
587 for (int j
= 0; j
< c_nbnxnGpuClusterSize
; j
++)
589 int j0
= (csj
*c_nbnxnGpuClusterSize
+ j
)*stride
;
591 real d2
= gmx::square(x_i
[i0
] - x_j
[j0
]) + gmx::square(x_i
[i0
+1] - x_j
[j0
+1]) + gmx::square(x_i
[i0
+2] - x_j
[j0
+2]);
602 #else /* !GMX_SIMD4_HAVE_REAL */
604 /* 4-wide SIMD version.
605 * A cluster is hard-coded to 8 atoms.
606 * The coordinates x_i are stored as xxxxyyyy..., x_j is stored xyzxyz...
607 * Using 8-wide AVX(2) is not faster on Intel Sandy Bridge and Haswell.
609 assert(c_nbnxnGpuClusterSize
== 8);
611 Simd4Real rc2_S
= Simd4Real(rl2
);
613 const real
*x_i
= work
->x_ci_simd
;
615 int dim_stride
= c_nbnxnGpuClusterSize
*DIM
;
616 Simd4Real ix_S0
= load4(x_i
+ si
*dim_stride
+ 0*GMX_SIMD4_WIDTH
);
617 Simd4Real iy_S0
= load4(x_i
+ si
*dim_stride
+ 1*GMX_SIMD4_WIDTH
);
618 Simd4Real iz_S0
= load4(x_i
+ si
*dim_stride
+ 2*GMX_SIMD4_WIDTH
);
619 Simd4Real ix_S1
= load4(x_i
+ si
*dim_stride
+ 3*GMX_SIMD4_WIDTH
);
620 Simd4Real iy_S1
= load4(x_i
+ si
*dim_stride
+ 4*GMX_SIMD4_WIDTH
);
621 Simd4Real iz_S1
= load4(x_i
+ si
*dim_stride
+ 5*GMX_SIMD4_WIDTH
);
623 /* We loop from the outer to the inner particles to maximize
624 * the chance that we find a pair in range quickly and return.
626 int j0
= csj
*c_nbnxnGpuClusterSize
;
627 int j1
= j0
+ c_nbnxnGpuClusterSize
- 1;
630 Simd4Real jx0_S
, jy0_S
, jz0_S
;
631 Simd4Real jx1_S
, jy1_S
, jz1_S
;
633 Simd4Real dx_S0
, dy_S0
, dz_S0
;
634 Simd4Real dx_S1
, dy_S1
, dz_S1
;
635 Simd4Real dx_S2
, dy_S2
, dz_S2
;
636 Simd4Real dx_S3
, dy_S3
, dz_S3
;
647 Simd4Bool wco_any_S01
, wco_any_S23
, wco_any_S
;
649 jx0_S
= Simd4Real(x_j
[j0
*stride
+0]);
650 jy0_S
= Simd4Real(x_j
[j0
*stride
+1]);
651 jz0_S
= Simd4Real(x_j
[j0
*stride
+2]);
653 jx1_S
= Simd4Real(x_j
[j1
*stride
+0]);
654 jy1_S
= Simd4Real(x_j
[j1
*stride
+1]);
655 jz1_S
= Simd4Real(x_j
[j1
*stride
+2]);
657 /* Calculate distance */
658 dx_S0
= ix_S0
- jx0_S
;
659 dy_S0
= iy_S0
- jy0_S
;
660 dz_S0
= iz_S0
- jz0_S
;
661 dx_S1
= ix_S1
- jx0_S
;
662 dy_S1
= iy_S1
- jy0_S
;
663 dz_S1
= iz_S1
- jz0_S
;
664 dx_S2
= ix_S0
- jx1_S
;
665 dy_S2
= iy_S0
- jy1_S
;
666 dz_S2
= iz_S0
- jz1_S
;
667 dx_S3
= ix_S1
- jx1_S
;
668 dy_S3
= iy_S1
- jy1_S
;
669 dz_S3
= iz_S1
- jz1_S
;
671 /* rsq = dx*dx+dy*dy+dz*dz */
672 rsq_S0
= norm2(dx_S0
, dy_S0
, dz_S0
);
673 rsq_S1
= norm2(dx_S1
, dy_S1
, dz_S1
);
674 rsq_S2
= norm2(dx_S2
, dy_S2
, dz_S2
);
675 rsq_S3
= norm2(dx_S3
, dy_S3
, dz_S3
);
677 wco_S0
= (rsq_S0
< rc2_S
);
678 wco_S1
= (rsq_S1
< rc2_S
);
679 wco_S2
= (rsq_S2
< rc2_S
);
680 wco_S3
= (rsq_S3
< rc2_S
);
682 wco_any_S01
= wco_S0
|| wco_S1
;
683 wco_any_S23
= wco_S2
|| wco_S3
;
684 wco_any_S
= wco_any_S01
|| wco_any_S23
;
686 if (anyTrue(wco_any_S
))
697 #endif /* !GMX_SIMD4_HAVE_REAL */
700 /* Returns the j sub-cell for index cj_ind */
701 static int nbl_cj(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
703 return nbl
->cj4
[cj_ind
/c_nbnxnGpuJgroupSize
].cj
[cj_ind
& (c_nbnxnGpuJgroupSize
- 1)];
706 /* Returns the i-interaction mask of the j sub-cell for index cj_ind */
707 static unsigned int nbl_imask0(const nbnxn_pairlist_t
*nbl
, int cj_ind
)
709 return nbl
->cj4
[cj_ind
/c_nbnxnGpuJgroupSize
].imei
[0].imask
;
712 /* Ensures there is enough space for extra extra exclusion masks */
713 static void check_excl_space(nbnxn_pairlist_t
*nbl
, int extra
)
715 if (nbl
->nexcl
+extra
> nbl
->excl_nalloc
)
717 nbl
->excl_nalloc
= over_alloc_small(nbl
->nexcl
+extra
);
718 nbnxn_realloc_void((void **)&nbl
->excl
,
719 nbl
->nexcl
*sizeof(*nbl
->excl
),
720 nbl
->excl_nalloc
*sizeof(*nbl
->excl
),
721 nbl
->alloc
, nbl
->free
);
725 /* Ensures there is enough space for ncell extra j-cells in the list */
726 static void check_cell_list_space_simple(nbnxn_pairlist_t
*nbl
,
731 cj_max
= nbl
->ncj
+ ncell
;
733 if (cj_max
> nbl
->cj_nalloc
)
735 nbl
->cj_nalloc
= over_alloc_small(cj_max
);
736 nbnxn_realloc_void((void **)&nbl
->cj
,
737 nbl
->ncj
*sizeof(*nbl
->cj
),
738 nbl
->cj_nalloc
*sizeof(*nbl
->cj
),
739 nbl
->alloc
, nbl
->free
);
743 /* Ensures there is enough space for ncell extra j-clusters in the list */
744 static void check_cell_list_space_supersub(nbnxn_pairlist_t
*nbl
,
749 /* We can have maximally nsupercell*c_gpuNumClusterPerCell sj lists */
750 /* We can store 4 j-subcell - i-supercell pairs in one struct.
751 * since we round down, we need one extra entry.
753 ncj4_max
= ((nbl
->work
->cj_ind
+ ncell
*c_gpuNumClusterPerCell
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
);
755 if (ncj4_max
> nbl
->cj4_nalloc
)
757 nbl
->cj4_nalloc
= over_alloc_small(ncj4_max
);
758 nbnxn_realloc_void((void **)&nbl
->cj4
,
759 nbl
->work
->cj4_init
*sizeof(*nbl
->cj4
),
760 nbl
->cj4_nalloc
*sizeof(*nbl
->cj4
),
761 nbl
->alloc
, nbl
->free
);
764 if (ncj4_max
> nbl
->work
->cj4_init
)
766 for (int j4
= nbl
->work
->cj4_init
; j4
< ncj4_max
; j4
++)
768 /* No i-subcells and no excl's in the list initially */
769 for (w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
771 nbl
->cj4
[j4
].imei
[w
].imask
= 0U;
772 nbl
->cj4
[j4
].imei
[w
].excl_ind
= 0;
776 nbl
->work
->cj4_init
= ncj4_max
;
780 /* Set all excl masks for one GPU warp no exclusions */
781 static void set_no_excls(nbnxn_excl_t
*excl
)
783 for (int t
= 0; t
< c_nbnxnGpuExclSize
; t
++)
785 /* Turn all interaction bits on */
786 excl
->pair
[t
] = NBNXN_INTERACTION_MASK_ALL
;
790 /* Initializes a single nbnxn_pairlist_t data structure */
791 static void nbnxn_init_pairlist(nbnxn_pairlist_t
*nbl
,
793 nbnxn_alloc_t
*alloc
,
798 nbl
->alloc
= nbnxn_alloc_aligned
;
806 nbl
->free
= nbnxn_free_aligned
;
813 nbl
->bSimple
= bSimple
;
828 /* We need one element extra in sj, so alloc initially with 1 */
835 GMX_ASSERT(c_nbnxnGpuNumClusterPerSupercluster
== c_gpuNumClusterPerCell
, "The search code assumes that the a super-cluster matches a search grid cell");
837 GMX_ASSERT(sizeof(nbl
->cj4
[0].imei
[0].imask
)*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The i super-cluster cluster interaction mask does not contain a sufficient number of bits");
838 GMX_ASSERT(sizeof(nbl
->excl
[0])*8 >= c_nbnxnGpuJgroupSize
*c_gpuNumClusterPerCell
, "The GPU exclusion mask does not contain a sufficient number of bits");
841 nbl
->excl_nalloc
= 0;
843 check_excl_space(nbl
, 1);
845 set_no_excls(&nbl
->excl
[0]);
851 snew_aligned(nbl
->work
->bb_ci
, 1, NBNXN_SEARCH_BB_MEM_ALIGN
);
856 snew_aligned(nbl
->work
->pbb_ci
, c_gpuNumClusterPerCell
/STRIDE_PBB
*NNBSBB_XXXX
, NBNXN_SEARCH_BB_MEM_ALIGN
);
858 snew_aligned(nbl
->work
->bb_ci
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
861 int gpu_clusterpair_nc
= c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
*DIM
;
862 snew(nbl
->work
->x_ci
, gpu_clusterpair_nc
);
864 snew_aligned(nbl
->work
->x_ci_simd
,
865 std::max(NBNXN_CPU_CLUSTER_I_SIZE
*DIM
*GMX_SIMD_REAL_WIDTH
,
867 GMX_SIMD_REAL_WIDTH
);
869 snew_aligned(nbl
->work
->d2
, c_gpuNumClusterPerCell
, NBNXN_SEARCH_BB_MEM_ALIGN
);
871 nbl
->work
->sort
= NULL
;
872 nbl
->work
->sort_nalloc
= 0;
873 nbl
->work
->sci_sort
= NULL
;
874 nbl
->work
->sci_sort_nalloc
= 0;
877 void nbnxn_init_pairlist_set(nbnxn_pairlist_set_t
*nbl_list
,
878 gmx_bool bSimple
, gmx_bool bCombined
,
879 nbnxn_alloc_t
*alloc
,
882 nbl_list
->bSimple
= bSimple
;
883 nbl_list
->bCombined
= bCombined
;
885 nbl_list
->nnbl
= gmx_omp_nthreads_get(emntNonbonded
);
887 if (!nbl_list
->bCombined
&&
888 nbl_list
->nnbl
> NBNXN_BUFFERFLAG_MAX_THREADS
)
890 gmx_fatal(FARGS
, "%d OpenMP threads were requested. Since the non-bonded force buffer reduction is prohibitively slow with more than %d threads, we do not allow this. Use %d or less OpenMP threads.",
891 nbl_list
->nnbl
, NBNXN_BUFFERFLAG_MAX_THREADS
, NBNXN_BUFFERFLAG_MAX_THREADS
);
894 snew(nbl_list
->nbl
, nbl_list
->nnbl
);
895 if (bSimple
&& nbl_list
->nnbl
> 1)
897 snew(nbl_list
->nbl_work
, nbl_list
->nnbl
);
899 snew(nbl_list
->nbl_fep
, nbl_list
->nnbl
);
900 /* Execute in order to avoid memory interleaving between threads */
901 #pragma omp parallel for num_threads(nbl_list->nnbl) schedule(static)
902 for (int i
= 0; i
< nbl_list
->nnbl
; i
++)
906 /* Allocate the nblist data structure locally on each thread
907 * to optimize memory access for NUMA architectures.
909 snew(nbl_list
->nbl
[i
], 1);
911 /* Only list 0 is used on the GPU, use normal allocation for i>0 */
912 if (!bSimple
&& i
== 0)
914 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, alloc
, free
);
918 nbnxn_init_pairlist(nbl_list
->nbl
[i
], nbl_list
->bSimple
, NULL
, NULL
);
919 if (bSimple
&& nbl_list
->nnbl
> 1)
921 snew(nbl_list
->nbl_work
[i
], 1);
922 nbnxn_init_pairlist(nbl_list
->nbl_work
[i
], nbl_list
->bSimple
, NULL
, NULL
);
926 snew(nbl_list
->nbl_fep
[i
], 1);
927 nbnxn_init_pairlist_fep(nbl_list
->nbl_fep
[i
]);
929 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
933 /* Print statistics of a pair list, used for debug output */
934 static void print_nblist_statistics_simple(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
935 const nbnxn_search_t nbs
, real rl
)
937 const nbnxn_grid_t
*grid
;
941 grid
= &nbs
->grid
[0];
943 fprintf(fp
, "nbl nci %d ncj %d\n",
944 nbl
->nci
, nbl
->ncjInUse
);
945 fprintf(fp
, "nbl na_sc %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
946 nbl
->na_sc
, rl
, nbl
->ncjInUse
, nbl
->ncjInUse
/(double)grid
->nc
,
947 nbl
->ncjInUse
/(double)grid
->nc
*grid
->na_sc
,
948 nbl
->ncjInUse
/(double)grid
->nc
*grid
->na_sc
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nc
*grid
->na_sc
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
950 fprintf(fp
, "nbl average j cell list length %.1f\n",
951 0.25*nbl
->ncjInUse
/(double)std::max(nbl
->nci
, 1));
953 for (int s
= 0; s
< SHIFTS
; s
++)
958 for (int i
= 0; i
< nbl
->nci
; i
++)
960 cs
[nbl
->ci
[i
].shift
& NBNXN_CI_SHIFT
] +=
961 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
;
963 int j
= nbl
->ci
[i
].cj_ind_start
;
964 while (j
< nbl
->ci
[i
].cj_ind_end
&&
965 nbl
->cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
971 fprintf(fp
, "nbl cell pairs, total: %d excl: %d %.1f%%\n",
972 nbl
->ncj
, npexcl
, 100*npexcl
/(double)std::max(nbl
->ncj
, 1));
973 for (int s
= 0; s
< SHIFTS
; s
++)
977 fprintf(fp
, "nbl shift %2d ncj %3d\n", s
, cs
[s
]);
982 /* Print statistics of a pair lists, used for debug output */
983 static void print_nblist_statistics_supersub(FILE *fp
, const nbnxn_pairlist_t
*nbl
,
984 const nbnxn_search_t nbs
, real rl
)
986 const nbnxn_grid_t
*grid
;
988 int c
[c_gpuNumClusterPerCell
+ 1];
989 double sum_nsp
, sum_nsp2
;
992 /* This code only produces correct statistics with domain decomposition */
993 grid
= &nbs
->grid
[0];
995 fprintf(fp
, "nbl nsci %d ncj4 %d nsi %d excl4 %d\n",
996 nbl
->nsci
, nbl
->ncj4
, nbl
->nci_tot
, nbl
->nexcl
);
997 fprintf(fp
, "nbl na_c %d rl %g ncp %d per cell %.1f atoms %.1f ratio %.2f\n",
998 nbl
->na_ci
, rl
, nbl
->nci_tot
, nbl
->nci_tot
/(double)grid
->nsubc_tot
,
999 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
,
1000 nbl
->nci_tot
/(double)grid
->nsubc_tot
*grid
->na_c
/(0.5*4.0/3.0*M_PI
*rl
*rl
*rl
*grid
->nsubc_tot
*grid
->na_c
/(grid
->size
[XX
]*grid
->size
[YY
]*grid
->size
[ZZ
])));
1005 for (int si
= 0; si
<= c_gpuNumClusterPerCell
; si
++)
1009 for (int i
= 0; i
< nbl
->nsci
; i
++)
1014 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
1016 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
1019 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
1021 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
1031 sum_nsp2
+= nsp
*nsp
;
1032 nsp_max
= std::max(nsp_max
, nsp
);
1036 sum_nsp
/= nbl
->nsci
;
1037 sum_nsp2
/= nbl
->nsci
;
1039 fprintf(fp
, "nbl #cluster-pairs: av %.1f stddev %.1f max %d\n",
1040 sum_nsp
, std::sqrt(sum_nsp2
- sum_nsp
*sum_nsp
), nsp_max
);
1044 for (b
= 0; b
<= c_gpuNumClusterPerCell
; b
++)
1046 fprintf(fp
, "nbl j-list #i-subcell %d %7d %4.1f\n",
1048 100.0*c
[b
]/(double)(nbl
->ncj4
*c_nbnxnGpuJgroupSize
));
1053 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp */
1054 static void low_get_nbl_exclusions(nbnxn_pairlist_t
*nbl
, int cj4
,
1055 int warp
, nbnxn_excl_t
**excl
)
1057 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1059 /* No exclusions set, make a new list entry */
1060 nbl
->cj4
[cj4
].imei
[warp
].excl_ind
= nbl
->nexcl
;
1062 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1063 set_no_excls(*excl
);
1067 /* We already have some exclusions, new ones can be added to the list */
1068 *excl
= &nbl
->excl
[nbl
->cj4
[cj4
].imei
[warp
].excl_ind
];
1072 /* Returns a pointer to the exclusion mask for cj4-unit cj4, warp warp,
1073 * generates a new element and allocates extra memory, if necessary.
1075 static void get_nbl_exclusions_1(nbnxn_pairlist_t
*nbl
, int cj4
,
1076 int warp
, nbnxn_excl_t
**excl
)
1078 if (nbl
->cj4
[cj4
].imei
[warp
].excl_ind
== 0)
1080 /* We need to make a new list entry, check if we have space */
1081 check_excl_space(nbl
, 1);
1083 low_get_nbl_exclusions(nbl
, cj4
, warp
, excl
);
1086 /* Returns pointers to the exclusion masks for cj4-unit cj4 for both warps,
1087 * generates a new element and allocates extra memory, if necessary.
1089 static void get_nbl_exclusions_2(nbnxn_pairlist_t
*nbl
, int cj4
,
1090 nbnxn_excl_t
**excl_w0
,
1091 nbnxn_excl_t
**excl_w1
)
1093 /* Check for space we might need */
1094 check_excl_space(nbl
, 2);
1096 low_get_nbl_exclusions(nbl
, cj4
, 0, excl_w0
);
1097 low_get_nbl_exclusions(nbl
, cj4
, 1, excl_w1
);
1100 /* Sets the self exclusions i=j and pair exclusions i>j */
1101 static void set_self_and_newton_excls_supersub(nbnxn_pairlist_t
*nbl
,
1102 int cj4_ind
, int sj_offset
,
1103 int i_cluster_in_cell
)
1105 nbnxn_excl_t
*excl
[c_nbnxnGpuClusterpairSplit
];
1107 /* Here we only set the set self and double pair exclusions */
1109 assert(c_nbnxnGpuClusterpairSplit
== 2);
1111 get_nbl_exclusions_2(nbl
, cj4_ind
, &excl
[0], &excl
[1]);
1113 /* Only minor < major bits set */
1114 for (int ej
= 0; ej
< nbl
->na_ci
; ej
++)
1117 for (int ei
= ej
; ei
< nbl
->na_ci
; ei
++)
1119 excl
[w
]->pair
[(ej
& (c_nbnxnGpuJgroupSize
-1))*nbl
->na_ci
+ ei
] &=
1120 ~(1U << (sj_offset
*c_gpuNumClusterPerCell
+ i_cluster_in_cell
));
1125 /* Returns a diagonal or off-diagonal interaction mask for plain C lists */
1126 static unsigned int get_imask(gmx_bool rdiag
, int ci
, int cj
)
1128 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1131 /* Returns a diagonal or off-diagonal interaction mask for cj-size=2 */
1132 static unsigned int get_imask_simd_j2(gmx_bool rdiag
, int ci
, int cj
)
1134 return (rdiag
&& ci
*2 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_0
:
1135 (rdiag
&& ci
*2+1 == cj
? NBNXN_INTERACTION_MASK_DIAG_J2_1
:
1136 NBNXN_INTERACTION_MASK_ALL
));
1139 /* Returns a diagonal or off-diagonal interaction mask for cj-size=4 */
1140 static unsigned int get_imask_simd_j4(gmx_bool rdiag
, int ci
, int cj
)
1142 return (rdiag
&& ci
== cj
? NBNXN_INTERACTION_MASK_DIAG
: NBNXN_INTERACTION_MASK_ALL
);
1145 /* Returns a diagonal or off-diagonal interaction mask for cj-size=8 */
1146 static unsigned int get_imask_simd_j8(gmx_bool rdiag
, int ci
, int cj
)
1148 return (rdiag
&& ci
== cj
*2 ? NBNXN_INTERACTION_MASK_DIAG_J8_0
:
1149 (rdiag
&& ci
== cj
*2+1 ? NBNXN_INTERACTION_MASK_DIAG_J8_1
:
1150 NBNXN_INTERACTION_MASK_ALL
));
1154 #if GMX_SIMD_REAL_WIDTH == 2
1155 #define get_imask_simd_4xn get_imask_simd_j2
1157 #if GMX_SIMD_REAL_WIDTH == 4
1158 #define get_imask_simd_4xn get_imask_simd_j4
1160 #if GMX_SIMD_REAL_WIDTH == 8
1161 #define get_imask_simd_4xn get_imask_simd_j8
1162 #define get_imask_simd_2xnn get_imask_simd_j4
1164 #if GMX_SIMD_REAL_WIDTH == 16
1165 #define get_imask_simd_2xnn get_imask_simd_j8
1169 /* Plain C code for making a pair list of cell ci vs cell cjf-cjl.
1170 * Checks bounding box distances and possibly atom pair distances.
1172 static void make_cluster_list_simple(const nbnxn_grid_t
*gridj
,
1173 nbnxn_pairlist_t
*nbl
,
1174 int ci
, int cjf
, int cjl
,
1175 gmx_bool remove_sub_diag
,
1177 real rl2
, float rbb2
,
1180 const nbnxn_bb_t
*bb_ci
;
1187 bb_ci
= nbl
->work
->bb_ci
;
1188 x_ci
= nbl
->work
->x_ci
;
1191 while (!InRange
&& cjf
<= cjl
)
1193 d2
= subc_bb_dist2(0, bb_ci
, cjf
, gridj
->bb
);
1196 /* Check if the distance is within the distance where
1197 * we use only the bounding box distance rbb,
1198 * or within the cut-off and there is at least one atom pair
1199 * within the cut-off.
1207 cjf_gl
= gridj
->cell0
+ cjf
;
1208 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1210 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1212 InRange
= InRange
||
1213 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1214 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1215 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjf_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rl2
);
1218 *ndistc
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1231 while (!InRange
&& cjl
> cjf
)
1233 d2
= subc_bb_dist2(0, bb_ci
, cjl
, gridj
->bb
);
1236 /* Check if the distance is within the distance where
1237 * we use only the bounding box distance rbb,
1238 * or within the cut-off and there is at least one atom pair
1239 * within the cut-off.
1247 cjl_gl
= gridj
->cell0
+ cjl
;
1248 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
&& !InRange
; i
++)
1250 for (int j
= 0; j
< NBNXN_CPU_CLUSTER_I_SIZE
; j
++)
1252 InRange
= InRange
||
1253 (gmx::square(x_ci
[i
*STRIDE_XYZ
+XX
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+XX
]) +
1254 gmx::square(x_ci
[i
*STRIDE_XYZ
+YY
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+YY
]) +
1255 gmx::square(x_ci
[i
*STRIDE_XYZ
+ZZ
] - x_j
[(cjl_gl
*NBNXN_CPU_CLUSTER_I_SIZE
+j
)*STRIDE_XYZ
+ZZ
]) < rl2
);
1258 *ndistc
+= NBNXN_CPU_CLUSTER_I_SIZE
*NBNXN_CPU_CLUSTER_I_SIZE
;
1268 for (int cj
= cjf
; cj
<= cjl
; cj
++)
1270 /* Store cj and the interaction mask */
1271 nbl
->cj
[nbl
->ncj
].cj
= gridj
->cell0
+ cj
;
1272 nbl
->cj
[nbl
->ncj
].excl
= get_imask(remove_sub_diag
, ci
, cj
);
1275 /* Increase the closing index in i super-cell list */
1276 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
1280 #ifdef GMX_NBNXN_SIMD_4XN
1281 #include "gromacs/mdlib/nbnxn_search_simd_4xn.h"
1283 #ifdef GMX_NBNXN_SIMD_2XNN
1284 #include "gromacs/mdlib/nbnxn_search_simd_2xnn.h"
1287 /* Plain C or SIMD4 code for making a pair list of super-cell sci vs scj.
1288 * Checks bounding box distances and possibly atom pair distances.
1290 static void make_cluster_list_supersub(const nbnxn_grid_t
*gridi
,
1291 const nbnxn_grid_t
*gridj
,
1292 nbnxn_pairlist_t
*nbl
,
1294 gmx_bool sci_equals_scj
,
1295 int stride
, const real
*x
,
1296 real rl2
, float rbb2
,
1299 nbnxn_list_work_t
*work
= nbl
->work
;
1302 const float *pbb_ci
= work
->pbb_ci
;
1304 const nbnxn_bb_t
*bb_ci
= work
->bb_ci
;
1307 assert(c_nbnxnGpuClusterSize
== gridi
->na_c
);
1308 assert(c_nbnxnGpuClusterSize
== gridj
->na_c
);
1310 /* We generate the pairlist mainly based on bounding-box distances
1311 * and do atom pair distance based pruning on the GPU.
1312 * Only if a j-group contains a single cluster-pair, we try to prune
1313 * that pair based on atom distances on the CPU to avoid empty j-groups.
1315 #define PRUNE_LIST_CPU_ONE 1
1316 #define PRUNE_LIST_CPU_ALL 0
1318 #if PRUNE_LIST_CPU_ONE
1322 float *d2l
= work
->d2
;
1324 for (int subc
= 0; subc
< gridj
->nsubc
[scj
]; subc
++)
1326 int cj4_ind
= nbl
->work
->cj_ind
/c_nbnxnGpuJgroupSize
;
1327 int cj_offset
= nbl
->work
->cj_ind
- cj4_ind
*c_nbnxnGpuJgroupSize
;
1328 nbnxn_cj4_t
*cj4
= &nbl
->cj4
[cj4_ind
];
1330 int cj
= scj
*c_gpuNumClusterPerCell
+ subc
;
1332 int cj_gl
= gridj
->cell0
*c_gpuNumClusterPerCell
+ cj
;
1334 /* Initialize this j-subcell i-subcell list */
1335 cj4
->cj
[cj_offset
] = cj_gl
;
1344 ci1
= gridi
->nsubc
[sci
];
1348 /* Determine all ci1 bb distances in one call with SIMD4 */
1349 subc_bb_dist2_simd4_xxxx(gridj
->pbb
+(cj
>>STRIDE_PBB_2LOG
)*NNBSBB_XXXX
+(cj
& (STRIDE_PBB
-1)),
1351 *ndistc
+= c_nbnxnGpuClusterSize
*2;
1355 unsigned int imask
= 0;
1356 /* We use a fixed upper-bound instead of ci1 to help optimization */
1357 for (int ci
= 0; ci
< c_gpuNumClusterPerCell
; ci
++)
1365 /* Determine the bb distance between ci and cj */
1366 d2l
[ci
] = subc_bb_dist2(ci
, bb_ci
, cj
, gridj
->bb
);
1371 #if PRUNE_LIST_CPU_ALL
1372 /* Check if the distance is within the distance where
1373 * we use only the bounding box distance rbb,
1374 * or within the cut-off and there is at least one atom pair
1375 * within the cut-off. This check is very costly.
1377 *ndistc
+= c_nbnxnGpuClusterSize
*c_nbnxnGpuClusterSize
;
1380 clusterpair_in_range(work
, ci
, cj_gl
, stride
, x
, rl2
)))
1382 /* Check if the distance between the two bounding boxes
1383 * in within the pair-list cut-off.
1388 /* Flag this i-subcell to be taken into account */
1389 imask
|= (1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci
));
1391 #if PRUNE_LIST_CPU_ONE
1399 #if PRUNE_LIST_CPU_ONE
1400 /* If we only found 1 pair, check if any atoms are actually
1401 * within the cut-off, so we could get rid of it.
1403 if (npair
== 1 && d2l
[ci_last
] >= rbb2
&&
1404 !clusterpair_in_range(work
, ci_last
, cj_gl
, stride
, x
, rl2
))
1406 imask
&= ~(1U << (cj_offset
*c_gpuNumClusterPerCell
+ ci_last
));
1413 /* We have a useful sj entry, close it now */
1415 /* Set the exclusions for the ci==sj entry.
1416 * Here we don't bother to check if this entry is actually flagged,
1417 * as it will nearly always be in the list.
1421 set_self_and_newton_excls_supersub(nbl
, cj4_ind
, cj_offset
, subc
);
1424 /* Copy the cluster interaction mask to the list */
1425 for (int w
= 0; w
< c_nbnxnGpuClusterpairSplit
; w
++)
1427 cj4
->imei
[w
].imask
|= imask
;
1430 nbl
->work
->cj_ind
++;
1432 /* Keep the count */
1433 nbl
->nci_tot
+= npair
;
1435 /* Increase the closing index in i super-cell list */
1436 nbl
->sci
[nbl
->nsci
].cj4_ind_end
=
1437 (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
1442 /* Set all atom-pair exclusions from the topology stored in excl
1443 * as masks in the pair-list for simple list i-entry nbl_ci
1445 static void set_ci_top_excls(const nbnxn_search_t nbs
,
1446 nbnxn_pairlist_t
*nbl
,
1447 gmx_bool diagRemoved
,
1450 const nbnxn_ci_t
*nbl_ci
,
1451 const t_blocka
*excl
)
1455 int cj_ind_first
, cj_ind_last
;
1456 int cj_first
, cj_last
;
1458 int ai
, aj
, si
, ge
, se
;
1459 int found
, cj_ind_0
, cj_ind_1
, cj_ind_m
;
1461 int inner_i
, inner_e
;
1465 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1473 cj_ind_first
= nbl_ci
->cj_ind_start
;
1474 cj_ind_last
= nbl
->ncj
- 1;
1476 cj_first
= nbl
->cj
[cj_ind_first
].cj
;
1477 cj_last
= nbl
->cj
[cj_ind_last
].cj
;
1479 /* Determine how many contiguous j-cells we have starting
1480 * from the first i-cell. This number can be used to directly
1481 * calculate j-cell indices for excluded atoms.
1484 if (na_ci_2log
== na_cj_2log
)
1486 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
1487 nbl
->cj
[cj_ind_first
+ndirect
].cj
== ci
+ ndirect
)
1492 #if NBNXN_SEARCH_BB_SIMD4
1495 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
1496 nbl
->cj
[cj_ind_first
+ndirect
].cj
== ci_to_cj(ci
, na_cj_2log
) + ndirect
)
1503 /* Loop over the atoms in the i super-cell */
1504 for (int i
= 0; i
< nbl
->na_sc
; i
++)
1506 ai
= nbs
->a
[ci
*nbl
->na_sc
+i
];
1509 si
= (i
>>na_ci_2log
);
1511 /* Loop over the topology-based exclusions for this i-atom */
1512 for (int eind
= excl
->index
[ai
]; eind
< excl
->index
[ai
+1]; eind
++)
1518 /* The self exclusion are already set, save some time */
1524 /* Without shifts we only calculate interactions j>i
1525 * for one-way pair-lists.
1527 if (diagRemoved
&& ge
<= ci
*nbl
->na_sc
+ i
)
1532 se
= (ge
>> na_cj_2log
);
1534 /* Could the cluster se be in our list? */
1535 if (se
>= cj_first
&& se
<= cj_last
)
1537 if (se
< cj_first
+ ndirect
)
1539 /* We can calculate cj_ind directly from se */
1540 found
= cj_ind_first
+ se
- cj_first
;
1544 /* Search for se using bisection */
1546 cj_ind_0
= cj_ind_first
+ ndirect
;
1547 cj_ind_1
= cj_ind_last
+ 1;
1548 while (found
== -1 && cj_ind_0
< cj_ind_1
)
1550 cj_ind_m
= (cj_ind_0
+ cj_ind_1
)>>1;
1552 cj_m
= nbl
->cj
[cj_ind_m
].cj
;
1560 cj_ind_1
= cj_ind_m
;
1564 cj_ind_0
= cj_ind_m
+ 1;
1571 inner_i
= i
- (si
<< na_ci_2log
);
1572 inner_e
= ge
- (se
<< na_cj_2log
);
1574 nbl
->cj
[found
].excl
&= ~(1U<<((inner_i
<<na_cj_2log
) + inner_e
));
1582 /* Add a new i-entry to the FEP list and copy the i-properties */
1583 static gmx_inline
void fep_list_new_nri_copy(t_nblist
*nlist
)
1585 /* Add a new i-entry */
1588 assert(nlist
->nri
< nlist
->maxnri
);
1590 /* Duplicate the last i-entry, except for jindex, which continues */
1591 nlist
->iinr
[nlist
->nri
] = nlist
->iinr
[nlist
->nri
-1];
1592 nlist
->shift
[nlist
->nri
] = nlist
->shift
[nlist
->nri
-1];
1593 nlist
->gid
[nlist
->nri
] = nlist
->gid
[nlist
->nri
-1];
1594 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1597 /* For load balancing of the free-energy lists over threads, we set
1598 * the maximum nrj size of an i-entry to 40. This leads to good
1599 * load balancing in the worst case scenario of a single perturbed
1600 * particle on 16 threads, while not introducing significant overhead.
1601 * Note that half of the perturbed pairs will anyhow end up in very small lists,
1602 * since non perturbed i-particles will see few perturbed j-particles).
1604 const int max_nrj_fep
= 40;
1606 /* Exclude the perturbed pairs from the Verlet list. This is only done to avoid
1607 * singularities for overlapping particles (0/0), since the charges and
1608 * LJ parameters have been zeroed in the nbnxn data structure.
1609 * Simultaneously make a group pair list for the perturbed pairs.
1611 static void make_fep_list(const nbnxn_search_t nbs
,
1612 const nbnxn_atomdata_t
*nbat
,
1613 nbnxn_pairlist_t
*nbl
,
1614 gmx_bool bDiagRemoved
,
1616 const nbnxn_grid_t
*gridi
,
1617 const nbnxn_grid_t
*gridj
,
1620 int ci
, cj_ind_start
, cj_ind_end
, cja
, cjr
;
1622 int ngid
, gid_i
= 0, gid_j
, gid
;
1623 int egp_shift
, egp_mask
;
1625 int ind_i
, ind_j
, ai
, aj
;
1627 gmx_bool bFEP_i
, bFEP_i_all
;
1629 if (nbl_ci
->cj_ind_end
== nbl_ci
->cj_ind_start
)
1637 cj_ind_start
= nbl_ci
->cj_ind_start
;
1638 cj_ind_end
= nbl_ci
->cj_ind_end
;
1640 /* In worst case we have alternating energy groups
1641 * and create #atom-pair lists, which means we need the size
1642 * of a cluster pair (na_ci*na_cj) times the number of cj's.
1644 nri_max
= nbl
->na_ci
*nbl
->na_cj
*(cj_ind_end
- cj_ind_start
);
1645 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1647 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1648 reallocate_nblist(nlist
);
1651 ngid
= nbat
->nenergrp
;
1653 if (static_cast<std::size_t>(ngid
*gridj
->na_cj
) > sizeof(gid_cj
)*8)
1655 gmx_fatal(FARGS
, "The Verlet scheme with %dx%d kernels and free-energy only supports up to %d energy groups",
1656 gridi
->na_c
, gridj
->na_cj
, (sizeof(gid_cj
)*8)/gridj
->na_cj
);
1659 egp_shift
= nbat
->neg_2log
;
1660 egp_mask
= (1<<nbat
->neg_2log
) - 1;
1662 /* Loop over the atoms in the i sub-cell */
1664 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1666 ind_i
= ci
*nbl
->na_ci
+ i
;
1671 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1672 nlist
->iinr
[nri
] = ai
;
1673 /* The actual energy group pair index is set later */
1674 nlist
->gid
[nri
] = 0;
1675 nlist
->shift
[nri
] = nbl_ci
->shift
& NBNXN_CI_SHIFT
;
1677 bFEP_i
= gridi
->fep
[ci
- gridi
->cell0
] & (1 << i
);
1679 bFEP_i_all
= bFEP_i_all
&& bFEP_i
;
1681 if (nlist
->nrj
+ (cj_ind_end
- cj_ind_start
)*nbl
->na_cj
> nlist
->maxnrj
)
1683 nlist
->maxnrj
= over_alloc_small(nlist
->nrj
+ (cj_ind_end
- cj_ind_start
)*nbl
->na_cj
);
1684 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1685 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1690 gid_i
= (nbat
->energrp
[ci
] >> (egp_shift
*i
)) & egp_mask
;
1693 for (int cj_ind
= cj_ind_start
; cj_ind
< cj_ind_end
; cj_ind
++)
1695 unsigned int fep_cj
;
1697 cja
= nbl
->cj
[cj_ind
].cj
;
1699 if (gridj
->na_cj
== gridj
->na_c
)
1701 cjr
= cja
- gridj
->cell0
;
1702 fep_cj
= gridj
->fep
[cjr
];
1705 gid_cj
= nbat
->energrp
[cja
];
1708 else if (2*gridj
->na_cj
== gridj
->na_c
)
1710 cjr
= cja
- gridj
->cell0
*2;
1711 /* Extract half of the ci fep/energrp mask */
1712 fep_cj
= (gridj
->fep
[cjr
>>1] >> ((cjr
&1)*gridj
->na_cj
)) & ((1<<gridj
->na_cj
) - 1);
1715 gid_cj
= nbat
->energrp
[cja
>>1] >> ((cja
&1)*gridj
->na_cj
*egp_shift
) & ((1<<(gridj
->na_cj
*egp_shift
)) - 1);
1720 cjr
= cja
- (gridj
->cell0
>>1);
1721 /* Combine two ci fep masks/energrp */
1722 fep_cj
= gridj
->fep
[cjr
*2] + (gridj
->fep
[cjr
*2+1] << gridj
->na_c
);
1725 gid_cj
= nbat
->energrp
[cja
*2] + (nbat
->energrp
[cja
*2+1] << (gridj
->na_c
*egp_shift
));
1729 if (bFEP_i
|| fep_cj
!= 0)
1731 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1733 /* Is this interaction perturbed and not excluded? */
1734 ind_j
= cja
*nbl
->na_cj
+ j
;
1737 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1738 (!bDiagRemoved
|| ind_j
>= ind_i
))
1742 gid_j
= (gid_cj
>> (j
*egp_shift
)) & egp_mask
;
1743 gid
= GID(gid_i
, gid_j
, ngid
);
1745 if (nlist
->nrj
> nlist
->jindex
[nri
] &&
1746 nlist
->gid
[nri
] != gid
)
1748 /* Energy group pair changed: new list */
1749 fep_list_new_nri_copy(nlist
);
1752 nlist
->gid
[nri
] = gid
;
1755 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1757 fep_list_new_nri_copy(nlist
);
1761 /* Add it to the FEP list */
1762 nlist
->jjnr
[nlist
->nrj
] = aj
;
1763 nlist
->excl_fep
[nlist
->nrj
] = (nbl
->cj
[cj_ind
].excl
>> (i
*nbl
->na_cj
+ j
)) & 1;
1766 /* Exclude it from the normal list.
1767 * Note that the charge has been set to zero,
1768 * but we need to avoid 0/0, as perturbed atoms
1769 * can be on top of each other.
1771 nbl
->cj
[cj_ind
].excl
&= ~(1U << (i
*nbl
->na_cj
+ j
));
1777 if (nlist
->nrj
> nlist
->jindex
[nri
])
1779 /* Actually add this new, non-empty, list */
1781 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1788 /* All interactions are perturbed, we can skip this entry */
1789 nbl_ci
->cj_ind_end
= cj_ind_start
;
1790 nbl
->ncjInUse
-= cj_ind_end
- cj_ind_start
;
1794 /* Return the index of atom a within a cluster */
1795 static gmx_inline
int cj_mod_cj4(int cj
)
1797 return cj
& (c_nbnxnGpuJgroupSize
- 1);
1800 /* Convert a j-cluster to a cj4 group */
1801 static gmx_inline
int cj_to_cj4(int cj
)
1803 return cj
/c_nbnxnGpuJgroupSize
;
1806 /* Return the index of an j-atom within a warp */
1807 static gmx_inline
int a_mod_wj(int a
)
1809 return a
& (c_nbnxnGpuClusterSize
/c_nbnxnGpuClusterpairSplit
- 1);
1812 /* As make_fep_list above, but for super/sub lists. */
1813 static void make_fep_list_supersub(const nbnxn_search_t nbs
,
1814 const nbnxn_atomdata_t
*nbat
,
1815 nbnxn_pairlist_t
*nbl
,
1816 gmx_bool bDiagRemoved
,
1817 const nbnxn_sci_t
*nbl_sci
,
1822 const nbnxn_grid_t
*gridi
,
1823 const nbnxn_grid_t
*gridj
,
1826 int sci
, cj4_ind_start
, cj4_ind_end
, cjr
;
1829 int ind_i
, ind_j
, ai
, aj
;
1833 const nbnxn_cj4_t
*cj4
;
1835 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
1843 cj4_ind_start
= nbl_sci
->cj4_ind_start
;
1844 cj4_ind_end
= nbl_sci
->cj4_ind_end
;
1846 /* Here we process one super-cell, max #atoms na_sc, versus a list
1847 * cj4 entries, each with max c_nbnxnGpuJgroupSize cj's, each
1848 * of size na_cj atoms.
1849 * On the GPU we don't support energy groups (yet).
1850 * So for each of the na_sc i-atoms, we need max one FEP list
1851 * for each max_nrj_fep j-atoms.
1853 nri_max
= nbl
->na_sc
*nbl
->na_cj
*(1 + ((cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
)/max_nrj_fep
);
1854 if (nlist
->nri
+ nri_max
> nlist
->maxnri
)
1856 nlist
->maxnri
= over_alloc_large(nlist
->nri
+ nri_max
);
1857 reallocate_nblist(nlist
);
1860 /* Loop over the atoms in the i super-cluster */
1861 for (int c
= 0; c
< c_gpuNumClusterPerCell
; c
++)
1863 c_abs
= sci
*c_gpuNumClusterPerCell
+ c
;
1865 for (int i
= 0; i
< nbl
->na_ci
; i
++)
1867 ind_i
= c_abs
*nbl
->na_ci
+ i
;
1872 nlist
->jindex
[nri
+1] = nlist
->jindex
[nri
];
1873 nlist
->iinr
[nri
] = ai
;
1874 /* With GPUs, energy groups are not supported */
1875 nlist
->gid
[nri
] = 0;
1876 nlist
->shift
[nri
] = nbl_sci
->shift
& NBNXN_CI_SHIFT
;
1878 bFEP_i
= (gridi
->fep
[c_abs
- gridi
->cell0
*c_gpuNumClusterPerCell
] & (1 << i
));
1880 xi
= nbat
->x
[ind_i
*nbat
->xstride
+XX
] + shx
;
1881 yi
= nbat
->x
[ind_i
*nbat
->xstride
+YY
] + shy
;
1882 zi
= nbat
->x
[ind_i
*nbat
->xstride
+ZZ
] + shz
;
1884 if ((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
> nlist
->maxnrj
)
1886 nlist
->maxnrj
= over_alloc_small((nlist
->nrj
+ cj4_ind_end
- cj4_ind_start
)*c_nbnxnGpuJgroupSize
*nbl
->na_cj
);
1887 srenew(nlist
->jjnr
, nlist
->maxnrj
);
1888 srenew(nlist
->excl_fep
, nlist
->maxnrj
);
1891 for (int cj4_ind
= cj4_ind_start
; cj4_ind
< cj4_ind_end
; cj4_ind
++)
1893 cj4
= &nbl
->cj4
[cj4_ind
];
1895 for (int gcj
= 0; gcj
< c_nbnxnGpuJgroupSize
; gcj
++)
1897 unsigned int fep_cj
;
1899 if ((cj4
->imei
[0].imask
& (1U << (gcj
*c_gpuNumClusterPerCell
+ c
))) == 0)
1901 /* Skip this ci for this cj */
1905 cjr
= cj4
->cj
[gcj
] - gridj
->cell0
*c_gpuNumClusterPerCell
;
1907 fep_cj
= gridj
->fep
[cjr
];
1909 if (bFEP_i
|| fep_cj
!= 0)
1911 for (int j
= 0; j
< nbl
->na_cj
; j
++)
1913 /* Is this interaction perturbed and not excluded? */
1914 ind_j
= (gridj
->cell0
*c_gpuNumClusterPerCell
+ cjr
)*nbl
->na_cj
+ j
;
1917 (bFEP_i
|| (fep_cj
& (1 << j
))) &&
1918 (!bDiagRemoved
|| ind_j
>= ind_i
))
1922 unsigned int excl_bit
;
1925 get_nbl_exclusions_1(nbl
, cj4_ind
, j
>>2, &excl
);
1927 excl_pair
= a_mod_wj(j
)*nbl
->na_ci
+ i
;
1928 excl_bit
= (1U << (gcj
*c_gpuNumClusterPerCell
+ c
));
1930 dx
= nbat
->x
[ind_j
*nbat
->xstride
+XX
] - xi
;
1931 dy
= nbat
->x
[ind_j
*nbat
->xstride
+YY
] - yi
;
1932 dz
= nbat
->x
[ind_j
*nbat
->xstride
+ZZ
] - zi
;
1934 /* The unpruned GPU list has more than 2/3
1935 * of the atom pairs beyond rlist. Using
1936 * this list will cause a lot of overhead
1937 * in the CPU FEP kernels, especially
1938 * relative to the fast GPU kernels.
1939 * So we prune the FEP list here.
1941 if (dx
*dx
+ dy
*dy
+ dz
*dz
< rlist_fep2
)
1943 if (nlist
->nrj
- nlist
->jindex
[nri
] >= max_nrj_fep
)
1945 fep_list_new_nri_copy(nlist
);
1949 /* Add it to the FEP list */
1950 nlist
->jjnr
[nlist
->nrj
] = aj
;
1951 nlist
->excl_fep
[nlist
->nrj
] = (excl
->pair
[excl_pair
] & excl_bit
) ? 1 : 0;
1955 /* Exclude it from the normal list.
1956 * Note that the charge and LJ parameters have
1957 * been set to zero, but we need to avoid 0/0,
1958 * as perturbed atoms can be on top of each other.
1960 excl
->pair
[excl_pair
] &= ~excl_bit
;
1964 /* Note that we could mask out this pair in imask
1965 * if all i- and/or all j-particles are perturbed.
1966 * But since the perturbed pairs on the CPU will
1967 * take an order of magnitude more time, the GPU
1968 * will finish before the CPU and there is no gain.
1974 if (nlist
->nrj
> nlist
->jindex
[nri
])
1976 /* Actually add this new, non-empty, list */
1978 nlist
->jindex
[nlist
->nri
] = nlist
->nrj
;
1985 /* Set all atom-pair exclusions from the topology stored in excl
1986 * as masks in the pair-list for i-super-cell entry nbl_sci
1988 static void set_sci_top_excls(const nbnxn_search_t nbs
,
1989 nbnxn_pairlist_t
*nbl
,
1990 gmx_bool diagRemoved
,
1992 const nbnxn_sci_t
*nbl_sci
,
1993 const t_blocka
*excl
)
1998 int cj_ind_first
, cj_ind_last
;
1999 int cj_first
, cj_last
;
2001 int ai
, aj
, si
, ge
, se
;
2002 int found
, cj_ind_0
, cj_ind_1
, cj_ind_m
;
2004 nbnxn_excl_t
*nbl_excl
;
2005 int inner_i
, inner_e
, w
;
2011 if (nbl_sci
->cj4_ind_end
== nbl_sci
->cj4_ind_start
)
2019 cj_ind_first
= nbl_sci
->cj4_ind_start
*c_nbnxnGpuJgroupSize
;
2020 cj_ind_last
= nbl
->work
->cj_ind
- 1;
2022 cj_first
= nbl
->cj4
[nbl_sci
->cj4_ind_start
].cj
[0];
2023 cj_last
= nbl_cj(nbl
, cj_ind_last
);
2025 /* Determine how many contiguous j-clusters we have starting
2026 * from the first i-cluster. This number can be used to directly
2027 * calculate j-cluster indices for excluded atoms.
2030 while (cj_ind_first
+ ndirect
<= cj_ind_last
&&
2031 nbl_cj(nbl
, cj_ind_first
+ndirect
) == sci
*c_gpuNumClusterPerCell
+ ndirect
)
2036 /* Loop over the atoms in the i super-cell */
2037 for (int i
= 0; i
< nbl
->na_sc
; i
++)
2039 ai
= nbs
->a
[sci
*nbl
->na_sc
+i
];
2042 si
= (i
>>na_c_2log
);
2044 /* Loop over the topology-based exclusions for this i-atom */
2045 for (int eind
= excl
->index
[ai
]; eind
< excl
->index
[ai
+1]; eind
++)
2051 /* The self exclusion are already set, save some time */
2057 /* Without shifts we only calculate interactions j>i
2058 * for one-way pair-lists.
2060 if (diagRemoved
&& ge
<= sci
*nbl
->na_sc
+ i
)
2066 /* Could the cluster se be in our list? */
2067 if (se
>= cj_first
&& se
<= cj_last
)
2069 if (se
< cj_first
+ ndirect
)
2071 /* We can calculate cj_ind directly from se */
2072 found
= cj_ind_first
+ se
- cj_first
;
2076 /* Search for se using bisection */
2078 cj_ind_0
= cj_ind_first
+ ndirect
;
2079 cj_ind_1
= cj_ind_last
+ 1;
2080 while (found
== -1 && cj_ind_0
< cj_ind_1
)
2082 cj_ind_m
= (cj_ind_0
+ cj_ind_1
)>>1;
2084 cj_m
= nbl_cj(nbl
, cj_ind_m
);
2092 cj_ind_1
= cj_ind_m
;
2096 cj_ind_0
= cj_ind_m
+ 1;
2103 inner_i
= i
- si
*na_c
;
2104 inner_e
= ge
- se
*na_c
;
2106 if (nbl_imask0(nbl
, found
) & (1U << (cj_mod_cj4(found
)*c_gpuNumClusterPerCell
+ si
)))
2110 get_nbl_exclusions_1(nbl
, cj_to_cj4(found
), w
, &nbl_excl
);
2112 nbl_excl
->pair
[a_mod_wj(inner_e
)*nbl
->na_ci
+inner_i
] &=
2113 ~(1U << (cj_mod_cj4(found
)*c_gpuNumClusterPerCell
+ si
));
2122 /* Reallocate the simple ci list for at least n entries */
2123 static void nb_realloc_ci(nbnxn_pairlist_t
*nbl
, int n
)
2125 nbl
->ci_nalloc
= over_alloc_small(n
);
2126 nbnxn_realloc_void((void **)&nbl
->ci
,
2127 nbl
->nci
*sizeof(*nbl
->ci
),
2128 nbl
->ci_nalloc
*sizeof(*nbl
->ci
),
2129 nbl
->alloc
, nbl
->free
);
2132 /* Reallocate the super-cell sci list for at least n entries */
2133 static void nb_realloc_sci(nbnxn_pairlist_t
*nbl
, int n
)
2135 nbl
->sci_nalloc
= over_alloc_small(n
);
2136 nbnxn_realloc_void((void **)&nbl
->sci
,
2137 nbl
->nsci
*sizeof(*nbl
->sci
),
2138 nbl
->sci_nalloc
*sizeof(*nbl
->sci
),
2139 nbl
->alloc
, nbl
->free
);
2142 /* Make a new ci entry at index nbl->nci */
2143 static void new_ci_entry(nbnxn_pairlist_t
*nbl
, int ci
, int shift
, int flags
)
2145 if (nbl
->nci
+ 1 > nbl
->ci_nalloc
)
2147 nb_realloc_ci(nbl
, nbl
->nci
+1);
2149 nbl
->ci
[nbl
->nci
].ci
= ci
;
2150 nbl
->ci
[nbl
->nci
].shift
= shift
;
2151 /* Store the interaction flags along with the shift */
2152 nbl
->ci
[nbl
->nci
].shift
|= flags
;
2153 nbl
->ci
[nbl
->nci
].cj_ind_start
= nbl
->ncj
;
2154 nbl
->ci
[nbl
->nci
].cj_ind_end
= nbl
->ncj
;
2157 /* Make a new sci entry at index nbl->nsci */
2158 static void new_sci_entry(nbnxn_pairlist_t
*nbl
, int sci
, int shift
)
2160 if (nbl
->nsci
+ 1 > nbl
->sci_nalloc
)
2162 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2164 nbl
->sci
[nbl
->nsci
].sci
= sci
;
2165 nbl
->sci
[nbl
->nsci
].shift
= shift
;
2166 nbl
->sci
[nbl
->nsci
].cj4_ind_start
= nbl
->ncj4
;
2167 nbl
->sci
[nbl
->nsci
].cj4_ind_end
= nbl
->ncj4
;
2170 /* Sort the simple j-list cj on exclusions.
2171 * Entries with exclusions will all be sorted to the beginning of the list.
2173 static void sort_cj_excl(nbnxn_cj_t
*cj
, int ncj
,
2174 nbnxn_list_work_t
*work
)
2178 if (ncj
> work
->cj_nalloc
)
2180 work
->cj_nalloc
= over_alloc_large(ncj
);
2181 srenew(work
->cj
, work
->cj_nalloc
);
2184 /* Make a list of the j-cells involving exclusions */
2186 for (int j
= 0; j
< ncj
; j
++)
2188 if (cj
[j
].excl
!= NBNXN_INTERACTION_MASK_ALL
)
2190 work
->cj
[jnew
++] = cj
[j
];
2193 /* Check if there are exclusions at all or not just the first entry */
2194 if (!((jnew
== 0) ||
2195 (jnew
== 1 && cj
[0].excl
!= NBNXN_INTERACTION_MASK_ALL
)))
2197 for (int j
= 0; j
< ncj
; j
++)
2199 if (cj
[j
].excl
== NBNXN_INTERACTION_MASK_ALL
)
2201 work
->cj
[jnew
++] = cj
[j
];
2204 for (int j
= 0; j
< ncj
; j
++)
2206 cj
[j
] = work
->cj
[j
];
2211 /* Close this simple list i entry */
2212 static void close_ci_entry_simple(nbnxn_pairlist_t
*nbl
)
2216 /* All content of the new ci entry have already been filled correctly,
2217 * we only need to increase the count here (for non empty lists).
2219 jlen
= nbl
->ci
[nbl
->nci
].cj_ind_end
- nbl
->ci
[nbl
->nci
].cj_ind_start
;
2222 sort_cj_excl(nbl
->cj
+nbl
->ci
[nbl
->nci
].cj_ind_start
, jlen
, nbl
->work
);
2224 /* The counts below are used for non-bonded pair/flop counts
2225 * and should therefore match the available kernel setups.
2227 if (!(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_COUL(0)))
2229 nbl
->work
->ncj_noq
+= jlen
;
2231 else if ((nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_HALF_LJ(0)) ||
2232 !(nbl
->ci
[nbl
->nci
].shift
& NBNXN_CI_DO_LJ(0)))
2234 nbl
->work
->ncj_hlj
+= jlen
;
2241 /* Split sci entry for load balancing on the GPU.
2242 * Splitting ensures we have enough lists to fully utilize the whole GPU.
2243 * With progBal we generate progressively smaller lists, which improves
2244 * load balancing. As we only know the current count on our own thread,
2245 * we will need to estimate the current total amount of i-entries.
2246 * As the lists get concatenated later, this estimate depends
2247 * both on nthread and our own thread index.
2249 static void split_sci_entry(nbnxn_pairlist_t
*nbl
,
2251 gmx_bool progBal
, float nsp_tot_est
,
2252 int thread
, int nthread
)
2255 int cj4_start
, cj4_end
, j4len
;
2257 int nsp
, nsp_sci
, nsp_cj4
, nsp_cj4_e
, nsp_cj4_p
;
2263 /* Estimate the total numbers of ci's of the nblist combined
2264 * over all threads using the target number of ci's.
2266 nsp_est
= (nsp_tot_est
*thread
)/nthread
+ nbl
->nci_tot
;
2268 /* The first ci blocks should be larger, to avoid overhead.
2269 * The last ci blocks should be smaller, to improve load balancing.
2270 * The factor 3/2 makes the first block 3/2 times the target average
2271 * and ensures that the total number of blocks end up equal to
2272 * that of equally sized blocks of size nsp_target_av.
2274 nsp_max
= static_cast<int>(nsp_target_av
*(nsp_tot_est
*1.5/(nsp_est
+ nsp_tot_est
)));
2278 nsp_max
= nsp_target_av
;
2281 cj4_start
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_start
;
2282 cj4_end
= nbl
->sci
[nbl
->nsci
-1].cj4_ind_end
;
2283 j4len
= cj4_end
- cj4_start
;
2285 if (j4len
> 1 && j4len
*c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
> nsp_max
)
2287 /* Remove the last ci entry and process the cj4's again */
2295 for (int cj4
= cj4_start
; cj4
< cj4_end
; cj4
++)
2297 nsp_cj4_p
= nsp_cj4
;
2298 /* Count the number of cluster pairs in this cj4 group */
2300 for (int p
= 0; p
< c_gpuNumClusterPerCell
*c_nbnxnGpuJgroupSize
; p
++)
2302 nsp_cj4
+= (nbl
->cj4
[cj4
].imei
[0].imask
>> p
) & 1;
2305 /* If adding the current cj4 with nsp_cj4 pairs get us further
2306 * away from our target nsp_max, split the list before this cj4.
2308 if (nsp
> 0 && nsp_max
- nsp
< nsp
+ nsp_cj4
- nsp_max
)
2310 /* Split the list at cj4 */
2311 nbl
->sci
[sci
].cj4_ind_end
= cj4
;
2312 /* Create a new sci entry */
2315 if (nbl
->nsci
+1 > nbl
->sci_nalloc
)
2317 nb_realloc_sci(nbl
, nbl
->nsci
+1);
2319 nbl
->sci
[sci
].sci
= nbl
->sci
[nbl
->nsci
-1].sci
;
2320 nbl
->sci
[sci
].shift
= nbl
->sci
[nbl
->nsci
-1].shift
;
2321 nbl
->sci
[sci
].cj4_ind_start
= cj4
;
2323 nsp_cj4_e
= nsp_cj4_p
;
2329 /* Put the remaining cj4's in the last sci entry */
2330 nbl
->sci
[sci
].cj4_ind_end
= cj4_end
;
2332 /* Possibly balance out the last two sci's
2333 * by moving the last cj4 of the second last sci.
2335 if (nsp_sci
- nsp_cj4_e
>= nsp
+ nsp_cj4_e
)
2337 nbl
->sci
[sci
-1].cj4_ind_end
--;
2338 nbl
->sci
[sci
].cj4_ind_start
--;
2345 /* Clost this super/sub list i entry */
2346 static void close_ci_entry_supersub(nbnxn_pairlist_t
*nbl
,
2348 gmx_bool progBal
, float nsp_tot_est
,
2349 int thread
, int nthread
)
2351 /* All content of the new ci entry have already been filled correctly,
2352 * we only need to increase the count here (for non empty lists).
2354 int j4len
= nbl
->sci
[nbl
->nsci
].cj4_ind_end
- nbl
->sci
[nbl
->nsci
].cj4_ind_start
;
2357 /* We can only have complete blocks of 4 j-entries in a list,
2358 * so round the count up before closing.
2360 nbl
->ncj4
= (nbl
->work
->cj_ind
+ c_nbnxnGpuJgroupSize
- 1)/c_nbnxnGpuJgroupSize
;
2361 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2367 /* Measure the size of the new entry and potentially split it */
2368 split_sci_entry(nbl
, nsp_max_av
, progBal
, nsp_tot_est
,
2374 /* Syncs the working array before adding another grid pair to the list */
2375 static void sync_work(nbnxn_pairlist_t
*nbl
)
2379 nbl
->work
->cj_ind
= nbl
->ncj4
*c_nbnxnGpuJgroupSize
;
2380 nbl
->work
->cj4_init
= nbl
->ncj4
;
2384 /* Clears an nbnxn_pairlist_t data structure */
2385 static void clear_pairlist(nbnxn_pairlist_t
*nbl
)
2395 nbl
->work
->ncj_noq
= 0;
2396 nbl
->work
->ncj_hlj
= 0;
2399 /* Clears a group scheme pair list */
2400 static void clear_pairlist_fep(t_nblist
*nl
)
2404 if (nl
->jindex
== NULL
)
2406 snew(nl
->jindex
, 1);
2411 /* Sets a simple list i-cell bounding box, including PBC shift */
2412 static gmx_inline
void set_icell_bb_simple(const nbnxn_bb_t
*bb
, int ci
,
2413 real shx
, real shy
, real shz
,
2416 bb_ci
->lower
[BB_X
] = bb
[ci
].lower
[BB_X
] + shx
;
2417 bb_ci
->lower
[BB_Y
] = bb
[ci
].lower
[BB_Y
] + shy
;
2418 bb_ci
->lower
[BB_Z
] = bb
[ci
].lower
[BB_Z
] + shz
;
2419 bb_ci
->upper
[BB_X
] = bb
[ci
].upper
[BB_X
] + shx
;
2420 bb_ci
->upper
[BB_Y
] = bb
[ci
].upper
[BB_Y
] + shy
;
2421 bb_ci
->upper
[BB_Z
] = bb
[ci
].upper
[BB_Z
] + shz
;
2425 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2426 static void set_icell_bbxxxx_supersub(const float *bb
, int ci
,
2427 real shx
, real shy
, real shz
,
2430 int ia
= ci
*(c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
;
2431 for (int m
= 0; m
< (c_gpuNumClusterPerCell
>> STRIDE_PBB_2LOG
)*NNBSBB_XXXX
; m
+= NNBSBB_XXXX
)
2433 for (int i
= 0; i
< STRIDE_PBB
; i
++)
2435 bb_ci
[m
+0*STRIDE_PBB
+i
] = bb
[ia
+m
+0*STRIDE_PBB
+i
] + shx
;
2436 bb_ci
[m
+1*STRIDE_PBB
+i
] = bb
[ia
+m
+1*STRIDE_PBB
+i
] + shy
;
2437 bb_ci
[m
+2*STRIDE_PBB
+i
] = bb
[ia
+m
+2*STRIDE_PBB
+i
] + shz
;
2438 bb_ci
[m
+3*STRIDE_PBB
+i
] = bb
[ia
+m
+3*STRIDE_PBB
+i
] + shx
;
2439 bb_ci
[m
+4*STRIDE_PBB
+i
] = bb
[ia
+m
+4*STRIDE_PBB
+i
] + shy
;
2440 bb_ci
[m
+5*STRIDE_PBB
+i
] = bb
[ia
+m
+5*STRIDE_PBB
+i
] + shz
;
2446 /* Sets a super-cell and sub cell bounding boxes, including PBC shift */
2447 static void set_icell_bb_supersub(const nbnxn_bb_t
*bb
, int ci
,
2448 real shx
, real shy
, real shz
,
2451 for (int i
= 0; i
< c_gpuNumClusterPerCell
; i
++)
2453 set_icell_bb_simple(bb
, ci
*c_gpuNumClusterPerCell
+i
,
2459 /* Copies PBC shifted i-cell atom coordinates x,y,z to working array */
2460 static void icell_set_x_simple(int ci
,
2461 real shx
, real shy
, real shz
,
2462 int stride
, const real
*x
,
2463 nbnxn_list_work_t
*work
)
2465 int ia
= ci
*NBNXN_CPU_CLUSTER_I_SIZE
;
2467 for (int i
= 0; i
< NBNXN_CPU_CLUSTER_I_SIZE
; i
++)
2469 work
->x_ci
[i
*STRIDE_XYZ
+XX
] = x
[(ia
+i
)*stride
+XX
] + shx
;
2470 work
->x_ci
[i
*STRIDE_XYZ
+YY
] = x
[(ia
+i
)*stride
+YY
] + shy
;
2471 work
->x_ci
[i
*STRIDE_XYZ
+ZZ
] = x
[(ia
+i
)*stride
+ZZ
] + shz
;
2475 /* Copies PBC shifted super-cell atom coordinates x,y,z to working array */
2476 static void icell_set_x_supersub(int ci
,
2477 real shx
, real shy
, real shz
,
2478 int stride
, const real
*x
,
2479 nbnxn_list_work_t
*work
)
2481 #if !GMX_SIMD4_HAVE_REAL
2483 real
* x_ci
= work
->x_ci
;
2485 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
;
2486 for (int i
= 0; i
< c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
; i
++)
2488 x_ci
[i
*DIM
+ XX
] = x
[(ia
+i
)*stride
+ XX
] + shx
;
2489 x_ci
[i
*DIM
+ YY
] = x
[(ia
+i
)*stride
+ YY
] + shy
;
2490 x_ci
[i
*DIM
+ ZZ
] = x
[(ia
+i
)*stride
+ ZZ
] + shz
;
2493 #else /* !GMX_SIMD4_HAVE_REAL */
2495 real
* x_ci
= work
->x_ci_simd
;
2497 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2499 for (int i
= 0; i
< c_nbnxnGpuClusterSize
; i
+= GMX_SIMD4_WIDTH
)
2501 int io
= si
*c_nbnxnGpuClusterSize
+ i
;
2502 int ia
= ci
*c_gpuNumClusterPerCell
*c_nbnxnGpuClusterSize
+ io
;
2503 for (int j
= 0; j
< GMX_SIMD4_WIDTH
; j
++)
2505 x_ci
[io
*DIM
+ j
+ XX
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ XX
] + shx
;
2506 x_ci
[io
*DIM
+ j
+ YY
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ YY
] + shy
;
2507 x_ci
[io
*DIM
+ j
+ ZZ
*GMX_SIMD4_WIDTH
] = x
[(ia
+ j
)*stride
+ ZZ
] + shz
;
2512 #endif /* !GMX_SIMD4_HAVE_REAL */
2515 static real
minimum_subgrid_size_xy(const nbnxn_grid_t
*grid
)
2519 return std::min(grid
->sx
, grid
->sy
);
2523 return std::min(grid
->sx
/c_gpuNumClusterPerCellX
,
2524 grid
->sy
/c_gpuNumClusterPerCellY
);
2528 static real
effective_buffer_1x1_vs_MxN(const nbnxn_grid_t
*gridi
,
2529 const nbnxn_grid_t
*gridj
)
2531 const real eff_1x1_buffer_fac_overest
= 0.1;
2533 /* Determine an atom-pair list cut-off buffer size for atom pairs,
2534 * to be added to rlist (including buffer) used for MxN.
2535 * This is for converting an MxN list to a 1x1 list. This means we can't
2536 * use the normal buffer estimate, as we have an MxN list in which
2537 * some atom pairs beyond rlist are missing. We want to capture
2538 * the beneficial effect of buffering by extra pairs just outside rlist,
2539 * while removing the useless pairs that are further away from rlist.
2540 * (Also the buffer could have been set manually not using the estimate.)
2541 * This buffer size is an overestimate.
2542 * We add 10% of the smallest grid sub-cell dimensions.
2543 * Note that the z-size differs per cell and we don't use this,
2544 * so we overestimate.
2545 * With PME, the 10% value gives a buffer that is somewhat larger
2546 * than the effective buffer with a tolerance of 0.005 kJ/mol/ps.
2547 * Smaller tolerances or using RF lead to a smaller effective buffer,
2548 * so 10% gives a safe overestimate.
2550 return eff_1x1_buffer_fac_overest
*(minimum_subgrid_size_xy(gridi
) +
2551 minimum_subgrid_size_xy(gridj
));
2554 /* Clusters at the cut-off only increase rlist by 60% of their size */
2555 static real nbnxn_rlist_inc_outside_fac
= 0.6;
2557 /* Due to the cluster size the effective pair-list is longer than
2558 * that of a simple atom pair-list. This function gives the extra distance.
2560 real
nbnxn_get_rlist_effective_inc(int cluster_size_j
, real atom_density
)
2563 real vol_inc_i
, vol_inc_j
;
2565 /* We should get this from the setup, but currently it's the same for
2566 * all setups, including GPUs.
2568 cluster_size_i
= NBNXN_CPU_CLUSTER_I_SIZE
;
2570 vol_inc_i
= (cluster_size_i
- 1)/atom_density
;
2571 vol_inc_j
= (cluster_size_j
- 1)/atom_density
;
2573 return nbnxn_rlist_inc_outside_fac
*std::cbrt(vol_inc_i
+ vol_inc_j
);
2576 /* Estimates the interaction volume^2 for non-local interactions */
2577 static real
nonlocal_vol2(const struct gmx_domdec_zones_t
*zones
, rvec ls
, real r
)
2585 /* Here we simply add up the volumes of 1, 2 or 3 1D decomposition
2586 * not home interaction volume^2. As these volumes are not additive,
2587 * this is an overestimate, but it would only be significant in the limit
2588 * of small cells, where we anyhow need to split the lists into
2589 * as small parts as possible.
2592 for (int z
= 0; z
< zones
->n
; z
++)
2594 if (zones
->shift
[z
][XX
] + zones
->shift
[z
][YY
] + zones
->shift
[z
][ZZ
] == 1)
2599 for (int d
= 0; d
< DIM
; d
++)
2601 if (zones
->shift
[z
][d
] == 0)
2605 za
*= zones
->size
[z
].x1
[d
] - zones
->size
[z
].x0
[d
];
2609 /* 4 octants of a sphere */
2610 vold_est
= 0.25*M_PI
*r
*r
*r
*r
;
2611 /* 4 quarter pie slices on the edges */
2612 vold_est
+= 4*cl
*M_PI
/6.0*r
*r
*r
;
2613 /* One rectangular volume on a face */
2614 vold_est
+= ca
*0.5*r
*r
;
2616 vol2_est_tot
+= vold_est
*za
;
2620 return vol2_est_tot
;
2623 /* Estimates the average size of a full j-list for super/sub setup */
2624 static void get_nsubpair_target(const nbnxn_search_t nbs
,
2627 int min_ci_balanced
,
2628 int *nsubpair_target
,
2629 float *nsubpair_tot_est
)
2631 /* The target value of 36 seems to be the optimum for Kepler.
2632 * Maxwell is less sensitive to the exact value.
2634 const int nsubpair_target_min
= 36;
2635 const nbnxn_grid_t
*grid
;
2637 real r_eff_sup
, vol_est
, nsp_est
, nsp_est_nl
;
2639 grid
= &nbs
->grid
[0];
2641 /* We don't need to balance list sizes if:
2642 * - We didn't request balancing.
2643 * - The number of grid cells >= the number of lists requested,
2644 * since we will always generate at least #cells lists.
2645 * - We don't have any cells, since then there won't be any lists.
2647 if (min_ci_balanced
<= 0 || grid
->nc
>= min_ci_balanced
|| grid
->nc
== 0)
2649 /* nsubpair_target==0 signals no balancing */
2650 *nsubpair_target
= 0;
2651 *nsubpair_tot_est
= 0;
2656 ls
[XX
] = (grid
->c1
[XX
] - grid
->c0
[XX
])/(grid
->ncx
*c_gpuNumClusterPerCellX
);
2657 ls
[YY
] = (grid
->c1
[YY
] - grid
->c0
[YY
])/(grid
->ncy
*c_gpuNumClusterPerCellY
);
2658 ls
[ZZ
] = grid
->na_c
/(grid
->atom_density
*ls
[XX
]*ls
[YY
]);
2660 /* The average length of the diagonal of a sub cell */
2661 real diagonal
= std::sqrt(ls
[XX
]*ls
[XX
] + ls
[YY
]*ls
[YY
] + ls
[ZZ
]*ls
[ZZ
]);
2663 /* The formulas below are a heuristic estimate of the average nsj per si*/
2664 r_eff_sup
= rlist
+ nbnxn_rlist_inc_outside_fac
*gmx::square((grid
->na_c
- 1.0)/grid
->na_c
)*0.5*diagonal
;
2666 if (!nbs
->DomDec
|| nbs
->zones
->n
== 1)
2673 gmx::square(grid
->atom_density
/grid
->na_c
)*
2674 nonlocal_vol2(nbs
->zones
, ls
, r_eff_sup
);
2679 /* Sub-cell interacts with itself */
2680 vol_est
= ls
[XX
]*ls
[YY
]*ls
[ZZ
];
2681 /* 6/2 rectangular volume on the faces */
2682 vol_est
+= (ls
[XX
]*ls
[YY
] + ls
[XX
]*ls
[ZZ
] + ls
[YY
]*ls
[ZZ
])*r_eff_sup
;
2683 /* 12/2 quarter pie slices on the edges */
2684 vol_est
+= 2*(ls
[XX
] + ls
[YY
] + ls
[ZZ
])*0.25*M_PI
*gmx::square(r_eff_sup
);
2685 /* 4 octants of a sphere */
2686 vol_est
+= 0.5*4.0/3.0*M_PI
*gmx::power3(r_eff_sup
);
2688 /* Estimate the number of cluster pairs as the local number of
2689 * clusters times the volume they interact with times the density.
2691 nsp_est
= grid
->nsubc_tot
*vol_est
*grid
->atom_density
/grid
->na_c
;
2693 /* Subtract the non-local pair count */
2694 nsp_est
-= nsp_est_nl
;
2696 /* For small cut-offs nsp_est will be an underesimate.
2697 * With DD nsp_est_nl is an overestimate so nsp_est can get negative.
2698 * So to avoid too small or negative nsp_est we set a minimum of
2699 * all cells interacting with all 3^3 direct neighbors (3^3-1)/2+1=14.
2700 * This might be a slight overestimate for small non-periodic groups of
2701 * atoms as will occur for a local domain with DD, but for small
2702 * groups of atoms we'll anyhow be limited by nsubpair_target_min,
2703 * so this overestimation will not matter.
2705 nsp_est
= std::max(nsp_est
, grid
->nsubc_tot
*static_cast<real
>(14));
2709 fprintf(debug
, "nsp_est local %5.1f non-local %5.1f\n",
2710 nsp_est
, nsp_est_nl
);
2715 nsp_est
= nsp_est_nl
;
2718 /* Thus the (average) maximum j-list size should be as follows.
2719 * Since there is overhead, we shouldn't make the lists too small
2720 * (and we can't chop up j-groups) so we use a minimum target size of 36.
2722 *nsubpair_target
= std::max(nsubpair_target_min
,
2723 static_cast<int>(nsp_est
/min_ci_balanced
+ 0.5));
2724 *nsubpair_tot_est
= static_cast<int>(nsp_est
);
2728 fprintf(debug
, "nbl nsp estimate %.1f, nsubpair_target %d\n",
2729 nsp_est
, *nsubpair_target
);
2733 /* Debug list print function */
2734 static void print_nblist_ci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2736 for (int i
= 0; i
< nbl
->nci
; i
++)
2738 fprintf(fp
, "ci %4d shift %2d ncj %3d\n",
2739 nbl
->ci
[i
].ci
, nbl
->ci
[i
].shift
,
2740 nbl
->ci
[i
].cj_ind_end
- nbl
->ci
[i
].cj_ind_start
);
2742 for (int j
= nbl
->ci
[i
].cj_ind_start
; j
< nbl
->ci
[i
].cj_ind_end
; j
++)
2744 fprintf(fp
, " cj %5d imask %x\n",
2751 /* Debug list print function */
2752 static void print_nblist_sci_cj(FILE *fp
, const nbnxn_pairlist_t
*nbl
)
2754 for (int i
= 0; i
< nbl
->nsci
; i
++)
2756 fprintf(fp
, "ci %4d shift %2d ncj4 %2d\n",
2757 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2758 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
);
2761 for (int j4
= nbl
->sci
[i
].cj4_ind_start
; j4
< nbl
->sci
[i
].cj4_ind_end
; j4
++)
2763 for (int j
= 0; j
< c_nbnxnGpuJgroupSize
; j
++)
2765 fprintf(fp
, " sj %5d imask %x\n",
2767 nbl
->cj4
[j4
].imei
[0].imask
);
2768 for (int si
= 0; si
< c_gpuNumClusterPerCell
; si
++)
2770 if (nbl
->cj4
[j4
].imei
[0].imask
& (1U << (j
*c_gpuNumClusterPerCell
+ si
)))
2777 fprintf(fp
, "ci %4d shift %2d ncj4 %2d ncp %3d\n",
2778 nbl
->sci
[i
].sci
, nbl
->sci
[i
].shift
,
2779 nbl
->sci
[i
].cj4_ind_end
- nbl
->sci
[i
].cj4_ind_start
,
2784 /* Combine pair lists *nbl generated on multiple threads nblc */
2785 static void combine_nblists(int nnbl
, nbnxn_pairlist_t
**nbl
,
2786 nbnxn_pairlist_t
*nblc
)
2788 int nsci
, ncj4
, nexcl
;
2792 gmx_incons("combine_nblists does not support simple lists");
2797 nexcl
= nblc
->nexcl
;
2798 for (int i
= 0; i
< nnbl
; i
++)
2800 nsci
+= nbl
[i
]->nsci
;
2801 ncj4
+= nbl
[i
]->ncj4
;
2802 nexcl
+= nbl
[i
]->nexcl
;
2805 if (nsci
> nblc
->sci_nalloc
)
2807 nb_realloc_sci(nblc
, nsci
);
2809 if (ncj4
> nblc
->cj4_nalloc
)
2811 nblc
->cj4_nalloc
= over_alloc_small(ncj4
);
2812 nbnxn_realloc_void((void **)&nblc
->cj4
,
2813 nblc
->ncj4
*sizeof(*nblc
->cj4
),
2814 nblc
->cj4_nalloc
*sizeof(*nblc
->cj4
),
2815 nblc
->alloc
, nblc
->free
);
2817 if (nexcl
> nblc
->excl_nalloc
)
2819 nblc
->excl_nalloc
= over_alloc_small(nexcl
);
2820 nbnxn_realloc_void((void **)&nblc
->excl
,
2821 nblc
->nexcl
*sizeof(*nblc
->excl
),
2822 nblc
->excl_nalloc
*sizeof(*nblc
->excl
),
2823 nblc
->alloc
, nblc
->free
);
2826 /* Each thread should copy its own data to the combined arrays,
2827 * as otherwise data will go back and forth between different caches.
2829 #if GMX_OPENMP && !(defined __clang_analyzer__)
2830 // cppcheck-suppress unreadVariable
2831 int nthreads
= gmx_omp_nthreads_get(emntPairsearch
);
2834 #pragma omp parallel for num_threads(nthreads) schedule(static)
2835 for (int n
= 0; n
< nnbl
; n
++)
2842 const nbnxn_pairlist_t
*nbli
;
2844 /* Determine the offset in the combined data for our thread */
2845 sci_offset
= nblc
->nsci
;
2846 cj4_offset
= nblc
->ncj4
;
2847 excl_offset
= nblc
->nexcl
;
2849 for (int i
= 0; i
< n
; i
++)
2851 sci_offset
+= nbl
[i
]->nsci
;
2852 cj4_offset
+= nbl
[i
]->ncj4
;
2853 excl_offset
+= nbl
[i
]->nexcl
;
2858 for (int i
= 0; i
< nbli
->nsci
; i
++)
2860 nblc
->sci
[sci_offset
+i
] = nbli
->sci
[i
];
2861 nblc
->sci
[sci_offset
+i
].cj4_ind_start
+= cj4_offset
;
2862 nblc
->sci
[sci_offset
+i
].cj4_ind_end
+= cj4_offset
;
2865 for (int j4
= 0; j4
< nbli
->ncj4
; j4
++)
2867 nblc
->cj4
[cj4_offset
+j4
] = nbli
->cj4
[j4
];
2868 nblc
->cj4
[cj4_offset
+j4
].imei
[0].excl_ind
+= excl_offset
;
2869 nblc
->cj4
[cj4_offset
+j4
].imei
[1].excl_ind
+= excl_offset
;
2872 for (int j4
= 0; j4
< nbli
->nexcl
; j4
++)
2874 nblc
->excl
[excl_offset
+j4
] = nbli
->excl
[j4
];
2877 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2880 for (int n
= 0; n
< nnbl
; n
++)
2882 nblc
->nsci
+= nbl
[n
]->nsci
;
2883 nblc
->ncj4
+= nbl
[n
]->ncj4
;
2884 nblc
->nci_tot
+= nbl
[n
]->nci_tot
;
2885 nblc
->nexcl
+= nbl
[n
]->nexcl
;
2889 static void balance_fep_lists(const nbnxn_search_t nbs
,
2890 nbnxn_pairlist_set_t
*nbl_lists
)
2893 int nri_tot
, nrj_tot
, nrj_target
;
2897 nnbl
= nbl_lists
->nnbl
;
2901 /* Nothing to balance */
2905 /* Count the total i-lists and pairs */
2908 for (int th
= 0; th
< nnbl
; th
++)
2910 nri_tot
+= nbl_lists
->nbl_fep
[th
]->nri
;
2911 nrj_tot
+= nbl_lists
->nbl_fep
[th
]->nrj
;
2914 nrj_target
= (nrj_tot
+ nnbl
- 1)/nnbl
;
2916 assert(gmx_omp_nthreads_get(emntNonbonded
) == nnbl
);
2918 #pragma omp parallel for schedule(static) num_threads(nnbl)
2919 for (int th
= 0; th
< nnbl
; th
++)
2925 nbl
= nbs
->work
[th
].nbl_fep
;
2927 /* Note that here we allocate for the total size, instead of
2928 * a per-thread esimate (which is hard to obtain).
2930 if (nri_tot
> nbl
->maxnri
)
2932 nbl
->maxnri
= over_alloc_large(nri_tot
);
2933 reallocate_nblist(nbl
);
2935 if (nri_tot
> nbl
->maxnri
|| nrj_tot
> nbl
->maxnrj
)
2937 nbl
->maxnrj
= over_alloc_small(nrj_tot
);
2938 srenew(nbl
->jjnr
, nbl
->maxnrj
);
2939 srenew(nbl
->excl_fep
, nbl
->maxnrj
);
2942 clear_pairlist_fep(nbl
);
2944 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2947 /* Loop over the source lists and assign and copy i-entries */
2949 nbld
= nbs
->work
[th_dest
].nbl_fep
;
2950 for (int th
= 0; th
< nnbl
; th
++)
2954 nbls
= nbl_lists
->nbl_fep
[th
];
2956 for (int i
= 0; i
< nbls
->nri
; i
++)
2960 /* The number of pairs in this i-entry */
2961 nrj
= nbls
->jindex
[i
+1] - nbls
->jindex
[i
];
2963 /* Decide if list th_dest is too large and we should procede
2964 * to the next destination list.
2966 if (th_dest
+1 < nnbl
&& nbld
->nrj
> 0 &&
2967 nbld
->nrj
+ nrj
- nrj_target
> nrj_target
- nbld
->nrj
)
2970 nbld
= nbs
->work
[th_dest
].nbl_fep
;
2973 nbld
->iinr
[nbld
->nri
] = nbls
->iinr
[i
];
2974 nbld
->gid
[nbld
->nri
] = nbls
->gid
[i
];
2975 nbld
->shift
[nbld
->nri
] = nbls
->shift
[i
];
2977 for (int j
= nbls
->jindex
[i
]; j
< nbls
->jindex
[i
+1]; j
++)
2979 nbld
->jjnr
[nbld
->nrj
] = nbls
->jjnr
[j
];
2980 nbld
->excl_fep
[nbld
->nrj
] = nbls
->excl_fep
[j
];
2984 nbld
->jindex
[nbld
->nri
] = nbld
->nrj
;
2988 /* Swap the list pointers */
2989 for (int th
= 0; th
< nnbl
; th
++)
2993 nbl_tmp
= nbl_lists
->nbl_fep
[th
];
2994 nbl_lists
->nbl_fep
[th
] = nbs
->work
[th
].nbl_fep
;
2995 nbs
->work
[th
].nbl_fep
= nbl_tmp
;
2999 fprintf(debug
, "nbl_fep[%d] nri %4d nrj %4d\n",
3001 nbl_lists
->nbl_fep
[th
]->nri
,
3002 nbl_lists
->nbl_fep
[th
]->nrj
);
3007 /* Returns the next ci to be processes by our thread */
3008 static gmx_bool
next_ci(const nbnxn_grid_t
*grid
,
3010 int nth
, int ci_block
,
3011 int *ci_x
, int *ci_y
,
3017 if (*ci_b
== ci_block
)
3019 /* Jump to the next block assigned to this task */
3020 *ci
+= (nth
- 1)*ci_block
;
3024 if (*ci
>= grid
->nc
*conv
)
3029 while (*ci
>= grid
->cxy_ind
[*ci_x
*grid
->ncy
+ *ci_y
+ 1]*conv
)
3032 if (*ci_y
== grid
->ncy
)
3042 /* Returns the distance^2 for which we put cell pairs in the list
3043 * without checking atom pair distances. This is usually < rlist^2.
3045 static float boundingbox_only_distance2(const nbnxn_grid_t
*gridi
,
3046 const nbnxn_grid_t
*gridj
,
3050 /* If the distance between two sub-cell bounding boxes is less
3051 * than this distance, do not check the distance between
3052 * all particle pairs in the sub-cell, since then it is likely
3053 * that the box pair has atom pairs within the cut-off.
3054 * We use the nblist cut-off minus 0.5 times the average x/y diagonal
3055 * spacing of the sub-cells. Around 40% of the checked pairs are pruned.
3056 * Using more than 0.5 gains at most 0.5%.
3057 * If forces are calculated more than twice, the performance gain
3058 * in the force calculation outweighs the cost of checking.
3059 * Note that with subcell lists, the atom-pair distance check
3060 * is only performed when only 1 out of 8 sub-cells in within range,
3061 * this is because the GPU is much faster than the cpu.
3066 bbx
= 0.5*(gridi
->sx
+ gridj
->sx
);
3067 bby
= 0.5*(gridi
->sy
+ gridj
->sy
);
3070 bbx
/= c_gpuNumClusterPerCellX
;
3071 bby
/= c_gpuNumClusterPerCellY
;
3074 rbb2
= std::max(0.0, rlist
- 0.5*std::sqrt(bbx
*bbx
+ bby
*bby
));
3080 return (float)((1+GMX_FLOAT_EPS
)*rbb2
);
3084 static int get_ci_block_size(const nbnxn_grid_t
*gridi
,
3085 gmx_bool bDomDec
, int nth
)
3087 const int ci_block_enum
= 5;
3088 const int ci_block_denom
= 11;
3089 const int ci_block_min_atoms
= 16;
3092 /* Here we decide how to distribute the blocks over the threads.
3093 * We use prime numbers to try to avoid that the grid size becomes
3094 * a multiple of the number of threads, which would lead to some
3095 * threads getting "inner" pairs and others getting boundary pairs,
3096 * which in turns will lead to load imbalance between threads.
3097 * Set the block size as 5/11/ntask times the average number of cells
3098 * in a y,z slab. This should ensure a quite uniform distribution
3099 * of the grid parts of the different thread along all three grid
3100 * zone boundaries with 3D domain decomposition. At the same time
3101 * the blocks will not become too small.
3103 ci_block
= (gridi
->nc
*ci_block_enum
)/(ci_block_denom
*gridi
->ncx
*nth
);
3105 /* Ensure the blocks are not too small: avoids cache invalidation */
3106 if (ci_block
*gridi
->na_sc
< ci_block_min_atoms
)
3108 ci_block
= (ci_block_min_atoms
+ gridi
->na_sc
- 1)/gridi
->na_sc
;
3111 /* Without domain decomposition
3112 * or with less than 3 blocks per task, divide in nth blocks.
3114 if (!bDomDec
|| nth
*3*ci_block
> gridi
->nc
)
3116 ci_block
= (gridi
->nc
+ nth
- 1)/nth
;
3119 if (ci_block
> 1 && (nth
- 1)*ci_block
>= gridi
->nc
)
3121 /* Some threads have no work. Although reducing the block size
3122 * does not decrease the block count on the first few threads,
3123 * with GPUs better mixing of "upper" cells that have more empty
3124 * clusters results in a somewhat lower max load over all threads.
3125 * Without GPUs the regime of so few atoms per thread is less
3126 * performance relevant, but with 8-wide SIMD the same reasoning
3127 * applies, since the pair list uses 4 i-atom "sub-clusters".
3135 /* Returns the number of bits to right-shift a cluster index to obtain
3136 * the corresponding force buffer flag index.
3138 static int getBufferFlagShift(int numAtomsPerCluster
)
3140 int bufferFlagShift
= 0;
3141 while ((numAtomsPerCluster
<< bufferFlagShift
) < NBNXN_BUFFERFLAG_SIZE
)
3146 return bufferFlagShift
;
3149 /* Generates the part of pair-list nbl assigned to our thread */
3150 static void nbnxn_make_pairlist_part(const nbnxn_search_t nbs
,
3151 const nbnxn_grid_t
*gridi
,
3152 const nbnxn_grid_t
*gridj
,
3153 nbnxn_search_work_t
*work
,
3154 const nbnxn_atomdata_t
*nbat
,
3155 const t_blocka
*excl
,
3159 gmx_bool bFBufferFlag
,
3162 float nsubpair_tot_est
,
3164 nbnxn_pairlist_t
*nbl
,
3169 real rl2
, rl_fep2
= 0;
3171 int ci_b
, ci
, ci_x
, ci_y
, ci_xy
, cj
;
3175 int conv_i
, cell0_i
;
3176 const nbnxn_bb_t
*bb_i
= NULL
;
3178 const float *pbb_i
= NULL
;
3180 const float *bbcz_i
, *bbcz_j
;
3182 real bx0
, bx1
, by0
, by1
, bz0
, bz1
;
3184 real d2cx
, d2z
, d2z_cx
, d2z_cy
, d2zx
, d2zxy
, d2xy
;
3185 int cxf
, cxl
, cyf
, cyf_x
, cyl
;
3186 int c0
, c1
, cs
, cf
, cl
;
3189 int gridi_flag_shift
= 0, gridj_flag_shift
= 0;
3190 gmx_bitmask_t
*gridj_flag
= NULL
;
3191 int ncj_old_i
, ncj_old_j
;
3193 nbs_cycle_start(&work
->cc
[enbsCCsearch
]);
3195 if (gridj
->bSimple
!= nbl
->bSimple
)
3197 gmx_incons("Grid incompatible with pair-list");
3201 nbl
->na_sc
= gridj
->na_sc
;
3202 nbl
->na_ci
= gridj
->na_c
;
3203 nbl
->na_cj
= nbnxn_kernel_to_cluster_j_size(nb_kernel_type
);
3204 na_cj_2log
= get_2log(nbl
->na_cj
);
3210 /* Determine conversion of clusters to flag blocks */
3211 gridi_flag_shift
= getBufferFlagShift(nbl
->na_ci
);
3212 gridj_flag_shift
= getBufferFlagShift(nbl
->na_cj
);
3214 gridj_flag
= work
->buffer_flags
.flag
;
3217 copy_mat(nbs
->box
, box
);
3219 rl2
= nbl
->rlist
*nbl
->rlist
;
3221 if (nbs
->bFEP
&& !nbl
->bSimple
)
3223 /* Determine an atom-pair list cut-off distance for FEP atom pairs.
3224 * We should not simply use rlist, since then we would not have
3225 * the small, effective buffering of the NxN lists.
3226 * The buffer is on overestimate, but the resulting cost for pairs
3227 * beyond rlist is neglible compared to the FEP pairs within rlist.
3229 rl_fep2
= nbl
->rlist
+ effective_buffer_1x1_vs_MxN(gridi
, gridj
);
3233 fprintf(debug
, "nbl_fep atom-pair rlist %f\n", rl_fep2
);
3235 rl_fep2
= rl_fep2
*rl_fep2
;
3238 rbb2
= boundingbox_only_distance2(gridi
, gridj
, nbl
->rlist
, nbl
->bSimple
);
3242 fprintf(debug
, "nbl bounding box only distance %f\n", std::sqrt(rbb2
));
3245 /* Set the shift range */
3246 for (int d
= 0; d
< DIM
; d
++)
3248 /* Check if we need periodicity shifts.
3249 * Without PBC or with domain decomposition we don't need them.
3251 if (d
>= ePBC2npbcdim(nbs
->ePBC
) || nbs
->dd_dim
[d
])
3258 box
[XX
][XX
] - fabs(box
[YY
][XX
]) - fabs(box
[ZZ
][XX
]) < std::sqrt(rl2
))
3269 if (nbl
->bSimple
&& !gridi
->bSimple
)
3271 conv_i
= gridi
->na_sc
/gridj
->na_sc
;
3272 bb_i
= gridi
->bb_simple
;
3273 bbcz_i
= gridi
->bbcz_simple
;
3274 flags_i
= gridi
->flags_simple
;
3289 /* We use the normal bounding box format for both grid types */
3292 bbcz_i
= gridi
->bbcz
;
3293 flags_i
= gridi
->flags
;
3295 cell0_i
= gridi
->cell0
*conv_i
;
3297 bbcz_j
= gridj
->bbcz
;
3301 /* Blocks of the conversion factor - 1 give a large repeat count
3302 * combined with a small block size. This should result in good
3303 * load balancing for both small and large domains.
3305 ci_block
= conv_i
- 1;
3309 fprintf(debug
, "nbl nc_i %d col.av. %.1f ci_block %d\n",
3310 gridi
->nc
, gridi
->nc
/(double)(gridi
->ncx
*gridi
->ncy
), ci_block
);
3316 /* Initially ci_b and ci to 1 before where we want them to start,
3317 * as they will both be incremented in next_ci.
3320 ci
= th
*ci_block
- 1;
3323 while (next_ci(gridi
, conv_i
, nth
, ci_block
, &ci_x
, &ci_y
, &ci_b
, &ci
))
3325 if (nbl
->bSimple
&& flags_i
[ci
] == 0)
3330 ncj_old_i
= nbl
->ncj
;
3333 if (gridj
!= gridi
&& shp
[XX
] == 0)
3337 bx1
= bb_i
[ci
].upper
[BB_X
];
3341 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
;
3343 if (bx1
< gridj
->c0
[XX
])
3345 d2cx
= gmx::square(gridj
->c0
[XX
] - bx1
);
3354 ci_xy
= ci_x
*gridi
->ncy
+ ci_y
;
3356 /* Loop over shift vectors in three dimensions */
3357 for (int tz
= -shp
[ZZ
]; tz
<= shp
[ZZ
]; tz
++)
3359 shz
= tz
*box
[ZZ
][ZZ
];
3361 bz0
= bbcz_i
[ci
*NNBSBB_D
] + shz
;
3362 bz1
= bbcz_i
[ci
*NNBSBB_D
+1] + shz
;
3370 d2z
= gmx::square(bz1
);
3374 d2z
= gmx::square(bz0
- box
[ZZ
][ZZ
]);
3377 d2z_cx
= d2z
+ d2cx
;
3384 bz1_frac
= bz1
/(gridi
->cxy_ind
[ci_xy
+1] - gridi
->cxy_ind
[ci_xy
]);
3389 /* The check with bz1_frac close to or larger than 1 comes later */
3391 for (int ty
= -shp
[YY
]; ty
<= shp
[YY
]; ty
++)
3393 shy
= ty
*box
[YY
][YY
] + tz
*box
[ZZ
][YY
];
3397 by0
= bb_i
[ci
].lower
[BB_Y
] + shy
;
3398 by1
= bb_i
[ci
].upper
[BB_Y
] + shy
;
3402 by0
= gridi
->c0
[YY
] + (ci_y
)*gridi
->sy
+ shy
;
3403 by1
= gridi
->c0
[YY
] + (ci_y
+1)*gridi
->sy
+ shy
;
3406 get_cell_range(by0
, by1
,
3407 gridj
->ncy
, gridj
->c0
[YY
], gridj
->sy
, gridj
->inv_sy
,
3417 if (by1
< gridj
->c0
[YY
])
3419 d2z_cy
+= gmx::square(gridj
->c0
[YY
] - by1
);
3421 else if (by0
> gridj
->c1
[YY
])
3423 d2z_cy
+= gmx::square(by0
- gridj
->c1
[YY
]);
3426 for (int tx
= -shp
[XX
]; tx
<= shp
[XX
]; tx
++)
3428 shift
= XYZ2IS(tx
, ty
, tz
);
3430 if (pbc_shift_backward
&& gridi
== gridj
&& shift
> CENTRAL
)
3435 shx
= tx
*box
[XX
][XX
] + ty
*box
[YY
][XX
] + tz
*box
[ZZ
][XX
];
3439 bx0
= bb_i
[ci
].lower
[BB_X
] + shx
;
3440 bx1
= bb_i
[ci
].upper
[BB_X
] + shx
;
3444 bx0
= gridi
->c0
[XX
] + (ci_x
)*gridi
->sx
+ shx
;
3445 bx1
= gridi
->c0
[XX
] + (ci_x
+1)*gridi
->sx
+ shx
;
3448 get_cell_range(bx0
, bx1
,
3449 gridj
->ncx
, gridj
->c0
[XX
], gridj
->sx
, gridj
->inv_sx
,
3460 new_ci_entry(nbl
, cell0_i
+ci
, shift
, flags_i
[ci
]);
3464 new_sci_entry(nbl
, cell0_i
+ci
, shift
);
3467 if ((!pbc_shift_backward
|| (shift
== CENTRAL
&&
3471 /* Leave the pairs with i > j.
3472 * x is the major index, so skip half of it.
3479 set_icell_bb_simple(bb_i
, ci
, shx
, shy
, shz
,
3485 set_icell_bbxxxx_supersub(pbb_i
, ci
, shx
, shy
, shz
,
3488 set_icell_bb_supersub(bb_i
, ci
, shx
, shy
, shz
,
3493 nbs
->icell_set_x(cell0_i
+ci
, shx
, shy
, shz
,
3494 nbat
->xstride
, nbat
->x
,
3497 for (int cx
= cxf
; cx
<= cxl
; cx
++)
3500 if (gridj
->c0
[XX
] + cx
*gridj
->sx
> bx1
)
3502 d2zx
+= gmx::square(gridj
->c0
[XX
] + cx
*gridj
->sx
- bx1
);
3504 else if (gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
< bx0
)
3506 d2zx
+= gmx::square(gridj
->c0
[XX
] + (cx
+1)*gridj
->sx
- bx0
);
3509 if (gridi
== gridj
&&
3511 (!pbc_shift_backward
|| shift
== CENTRAL
) &&
3514 /* Leave the pairs with i > j.
3515 * Skip half of y when i and j have the same x.
3524 for (int cy
= cyf_x
; cy
<= cyl
; cy
++)
3526 c0
= gridj
->cxy_ind
[cx
*gridj
->ncy
+cy
];
3527 c1
= gridj
->cxy_ind
[cx
*gridj
->ncy
+cy
+1];
3529 if (pbc_shift_backward
&&
3531 shift
== CENTRAL
&& c0
< ci
)
3537 if (gridj
->c0
[YY
] + cy
*gridj
->sy
> by1
)
3539 d2zxy
+= gmx::square(gridj
->c0
[YY
] + cy
*gridj
->sy
- by1
);
3541 else if (gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
< by0
)
3543 d2zxy
+= gmx::square(gridj
->c0
[YY
] + (cy
+1)*gridj
->sy
- by0
);
3545 if (c1
> c0
&& d2zxy
< rl2
)
3547 cs
= c0
+ static_cast<int>(bz1_frac
*(c1
- c0
));
3555 /* Find the lowest cell that can possibly
3560 (bbcz_j
[cf
*NNBSBB_D
+1] >= bz0
||
3561 d2xy
+ gmx::square(bbcz_j
[cf
*NNBSBB_D
+1] - bz0
) < rl2
))
3566 /* Find the highest cell that can possibly
3571 (bbcz_j
[cl
*NNBSBB_D
] <= bz1
||
3572 d2xy
+ gmx::square(bbcz_j
[cl
*NNBSBB_D
] - bz1
) < rl2
))
3577 #ifdef NBNXN_REFCODE
3579 /* Simple reference code, for debugging,
3580 * overrides the more complex code above.
3584 for (int k
= c0
; k
< c1
; k
++)
3586 if (box_dist2(bx0
, bx1
, by0
, by1
, bz0
, bz1
, bb
+k
) < rl2
&&
3591 if (box_dist2(bx0
, bx1
, by0
, by1
, bz0
, bz1
, bb
+k
) < rl2
&&
3602 /* We want each atom/cell pair only once,
3603 * only use cj >= ci.
3605 if (!pbc_shift_backward
|| shift
== CENTRAL
)
3607 cf
= std::max(cf
, ci
);
3613 /* For f buffer flags with simple lists */
3614 ncj_old_j
= nbl
->ncj
;
3616 switch (nb_kernel_type
)
3618 case nbnxnk4x4_PlainC
:
3619 check_cell_list_space_simple(nbl
, cl
-cf
+1);
3621 make_cluster_list_simple(gridj
,
3623 (gridi
== gridj
&& shift
== CENTRAL
),
3628 #ifdef GMX_NBNXN_SIMD_4XN
3629 case nbnxnk4xN_SIMD_4xN
:
3630 check_cell_list_space_simple(nbl
, ci_to_cj_simd_4xn(cl
- cf
) + 2);
3631 make_cluster_list_simd_4xn(gridj
,
3633 (gridi
== gridj
&& shift
== CENTRAL
),
3639 #ifdef GMX_NBNXN_SIMD_2XNN
3640 case nbnxnk4xN_SIMD_2xNN
:
3641 check_cell_list_space_simple(nbl
, ci_to_cj_simd_2xnn(cl
- cf
) + 2);
3642 make_cluster_list_simd_2xnn(gridj
,
3644 (gridi
== gridj
&& shift
== CENTRAL
),
3650 case nbnxnk8x8x8_PlainC
:
3651 case nbnxnk8x8x8_GPU
:
3652 check_cell_list_space_supersub(nbl
, cl
-cf
+1);
3653 for (cj
= cf
; cj
<= cl
; cj
++)
3655 make_cluster_list_supersub(gridi
, gridj
,
3657 (gridi
== gridj
&& shift
== CENTRAL
&& ci
== cj
),
3658 nbat
->xstride
, nbat
->x
,
3664 ncpcheck
+= cl
- cf
+ 1;
3666 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_j
)
3668 int cbf
= nbl
->cj
[ncj_old_j
].cj
>> gridj_flag_shift
;
3669 int cbl
= nbl
->cj
[nbl
->ncj
-1].cj
>> gridj_flag_shift
;
3670 for (int cb
= cbf
; cb
<= cbl
; cb
++)
3672 bitmask_init_bit(&gridj_flag
[cb
], th
);
3676 nbl
->ncjInUse
+= nbl
->ncj
- ncj_old_j
;
3682 /* Set the exclusions for this ci list */
3685 set_ci_top_excls(nbs
,
3687 shift
== CENTRAL
&& gridi
== gridj
,
3690 &(nbl
->ci
[nbl
->nci
]),
3695 make_fep_list(nbs
, nbat
, nbl
,
3696 shift
== CENTRAL
&& gridi
== gridj
,
3697 &(nbl
->ci
[nbl
->nci
]),
3698 gridi
, gridj
, nbl_fep
);
3703 set_sci_top_excls(nbs
,
3705 shift
== CENTRAL
&& gridi
== gridj
,
3707 &(nbl
->sci
[nbl
->nsci
]),
3712 make_fep_list_supersub(nbs
, nbat
, nbl
,
3713 shift
== CENTRAL
&& gridi
== gridj
,
3714 &(nbl
->sci
[nbl
->nsci
]),
3717 gridi
, gridj
, nbl_fep
);
3721 /* Close this ci list */
3724 close_ci_entry_simple(nbl
);
3728 close_ci_entry_supersub(nbl
,
3730 progBal
, nsubpair_tot_est
,
3737 if (bFBufferFlag
&& nbl
->ncj
> ncj_old_i
)
3739 bitmask_init_bit(&(work
->buffer_flags
.flag
[(gridi
->cell0
+ci
)>>gridi_flag_shift
]), th
);
3743 work
->ndistc
= ndistc
;
3745 nbs_cycle_stop(&work
->cc
[enbsCCsearch
]);
3747 GMX_ASSERT(nbl
->ncjInUse
== nbl
->ncj
|| nbs
->bFEP
, "Without free-energy all cj pair-list entries should be in use. Note that subsequent code does not make use of the equality, this check is only here to catch bugs");
3751 fprintf(debug
, "number of distance checks %d\n", ndistc
);
3752 fprintf(debug
, "ncpcheck %s %d\n", gridi
== gridj
? "local" : "non-local",
3757 print_nblist_statistics_simple(debug
, nbl
, nbs
, rlist
);
3761 print_nblist_statistics_supersub(debug
, nbl
, nbs
, rlist
);
3766 fprintf(debug
, "nbl FEP list pairs: %d\n", nbl_fep
->nrj
);
3771 static void reduce_buffer_flags(const nbnxn_search_t nbs
,
3773 const nbnxn_buffer_flags_t
*dest
)
3775 for (int s
= 0; s
< nsrc
; s
++)
3777 gmx_bitmask_t
* flag
= nbs
->work
[s
].buffer_flags
.flag
;
3779 for (int b
= 0; b
< dest
->nflag
; b
++)
3781 bitmask_union(&(dest
->flag
[b
]), flag
[b
]);
3786 static void print_reduction_cost(const nbnxn_buffer_flags_t
*flags
, int nout
)
3788 int nelem
, nkeep
, ncopy
, nred
, out
;
3789 gmx_bitmask_t mask_0
;
3795 bitmask_init_bit(&mask_0
, 0);
3796 for (int b
= 0; b
< flags
->nflag
; b
++)
3798 if (bitmask_is_equal(flags
->flag
[b
], mask_0
))
3800 /* Only flag 0 is set, no copy of reduction required */
3804 else if (!bitmask_is_zero(flags
->flag
[b
]))
3807 for (out
= 0; out
< nout
; out
++)
3809 if (bitmask_is_set(flags
->flag
[b
], out
))
3826 fprintf(debug
, "nbnxn reduction: #flag %d #list %d elem %4.2f, keep %4.2f copy %4.2f red %4.2f\n",
3828 nelem
/(double)(flags
->nflag
),
3829 nkeep
/(double)(flags
->nflag
),
3830 ncopy
/(double)(flags
->nflag
),
3831 nred
/(double)(flags
->nflag
));
3834 /* Copies the list entries from src to dest when cjStart <= *cjGlobal < cjEnd.
3835 * *cjGlobal is updated with the cj count in src.
3836 * When setFlags==true, flag bit t is set in flag for all i and j clusters.
3838 template<bool setFlags
>
3839 static void copySelectedListRange(const nbnxn_ci_t
* gmx_restrict srcCi
,
3840 const nbnxn_pairlist_t
* gmx_restrict src
,
3841 nbnxn_pairlist_t
* gmx_restrict dest
,
3842 gmx_bitmask_t
*flag
,
3843 int iFlagShift
, int jFlagShift
, int t
)
3845 int ncj
= srcCi
->cj_ind_end
- srcCi
->cj_ind_start
;
3847 if (dest
->nci
+ 1 >= dest
->ci_nalloc
)
3849 nb_realloc_ci(dest
, dest
->nci
+ 1);
3851 check_cell_list_space_simple(dest
, ncj
);
3853 dest
->ci
[dest
->nci
] = *srcCi
;
3854 dest
->ci
[dest
->nci
].cj_ind_start
= dest
->ncj
;
3855 dest
->ci
[dest
->nci
].cj_ind_end
= dest
->ncj
+ ncj
;
3859 bitmask_init_bit(&flag
[srcCi
->ci
>> iFlagShift
], t
);
3862 for (int j
= srcCi
->cj_ind_start
; j
< srcCi
->cj_ind_end
; j
++)
3864 dest
->cj
[dest
->ncj
++] = src
->cj
[j
];
3868 /* NOTE: This is relatively expensive, since this
3869 * operation is done for all elements in the list,
3870 * whereas at list generation this is done only
3871 * once for each flag entry.
3873 bitmask_init_bit(&flag
[src
->cj
[j
].cj
>> jFlagShift
], t
);
3880 /* This routine re-balances the pairlists such that all are nearly equally
3881 * sized. Only whole i-entries are moved between lists. These are moved
3882 * between the ends of the lists, such that the buffer reduction cost should
3883 * not change significantly.
3884 * Note that all original reduction flags are currently kept. This can lead
3885 * to reduction of parts of the force buffer that could be avoided. But since
3886 * the original lists are quite balanced, this will only give minor overhead.
3888 static void rebalanceSimpleLists(int numLists
,
3889 nbnxn_pairlist_t
* const * const srcSet
,
3890 nbnxn_pairlist_t
**destSet
,
3891 nbnxn_search_work_t
*searchWork
)
3894 for (int s
= 0; s
< numLists
; s
++)
3896 ncjTotal
+= srcSet
[s
]->ncjInUse
;
3898 int ncjTarget
= (ncjTotal
+ numLists
- 1)/numLists
;
3900 #pragma omp parallel num_threads(numLists)
3902 int t
= gmx_omp_get_thread_num();
3904 int cjStart
= ncjTarget
* t
;
3905 int cjEnd
= ncjTarget
*(t
+ 1);
3907 /* The destination pair-list for task/thread t */
3908 nbnxn_pairlist_t
*dest
= destSet
[t
];
3910 clear_pairlist(dest
);
3911 dest
->bSimple
= srcSet
[0]->bSimple
;
3912 dest
->na_ci
= srcSet
[0]->na_ci
;
3913 dest
->na_cj
= srcSet
[0]->na_cj
;
3915 /* Note that the flags in the work struct (still) contain flags
3916 * for all entries that are present in srcSet->nbl[t].
3918 gmx_bitmask_t
*flag
= searchWork
[t
].buffer_flags
.flag
;
3920 int iFlagShift
= getBufferFlagShift(dest
->na_ci
);
3921 int jFlagShift
= getBufferFlagShift(dest
->na_cj
);
3924 for (int s
= 0; s
< numLists
&& cjGlobal
< cjEnd
; s
++)
3926 const nbnxn_pairlist_t
*src
= srcSet
[s
];
3928 if (cjGlobal
+ src
->ncjInUse
> cjStart
)
3930 for (int i
= 0; i
< src
->nci
&& cjGlobal
< cjEnd
; i
++)
3932 const nbnxn_ci_t
*srcCi
= &src
->ci
[i
];
3933 int ncj
= srcCi
->cj_ind_end
- srcCi
->cj_ind_start
;
3934 if (cjGlobal
>= cjStart
)
3936 /* If the source list is not our own, we need to set
3937 * extra flags (the template bool parameter).
3941 copySelectedListRange
3944 flag
, iFlagShift
, jFlagShift
, t
);
3948 copySelectedListRange
3951 dest
, flag
, iFlagShift
, jFlagShift
, t
);
3959 cjGlobal
+= src
->ncjInUse
;
3963 dest
->ncjInUse
= dest
->ncj
;
3967 int ncjTotalNew
= 0;
3968 for (int s
= 0; s
< numLists
; s
++)
3970 ncjTotalNew
+= destSet
[s
]->ncjInUse
;
3972 GMX_RELEASE_ASSERT(ncjTotalNew
== ncjTotal
, "The total size of the lists before and after rebalancing should match");
3976 /* Returns if the pairlists are so imbalanced that it is worth rebalancing. */
3977 static bool checkRebalanceSimpleLists(const nbnxn_pairlist_set_t
*listSet
)
3979 int numLists
= listSet
->nnbl
;
3982 for (int s
= 0; s
< numLists
; s
++)
3984 ncjMax
= std::max(ncjMax
, listSet
->nbl
[s
]->ncjInUse
);
3985 ncjTotal
+= listSet
->nbl
[s
]->ncjInUse
;
3989 fprintf(debug
, "Pair-list ncjMax %d ncjTotal %d\n", ncjMax
, ncjTotal
);
3991 /* The rebalancing adds 3% extra time to the search. Heuristically we
3992 * determined that under common conditions the non-bonded kernel balance
3993 * improvement will outweigh this when the imbalance is more than 3%.
3994 * But this will, obviously, depend on search vs kernel time and nstlist.
3996 const real rebalanceTolerance
= 1.03;
3998 return numLists
*ncjMax
> ncjTotal
*rebalanceTolerance
;
4001 /* Perform a count (linear) sort to sort the smaller lists to the end.
4002 * This avoids load imbalance on the GPU, as large lists will be
4003 * scheduled and executed first and the smaller lists later.
4004 * Load balancing between multi-processors only happens at the end
4005 * and there smaller lists lead to more effective load balancing.
4006 * The sorting is done on the cj4 count, not on the actual pair counts.
4007 * Not only does this make the sort faster, but it also results in
4008 * better load balancing than using a list sorted on exact load.
4009 * This function swaps the pointer in the pair list to avoid a copy operation.
4011 static void sort_sci(nbnxn_pairlist_t
*nbl
)
4013 nbnxn_list_work_t
*work
;
4015 nbnxn_sci_t
*sci_sort
;
4017 if (nbl
->ncj4
<= nbl
->nsci
)
4019 /* nsci = 0 or all sci have size 1, sorting won't change the order */
4025 /* We will distinguish differences up to double the average */
4026 m
= (2*nbl
->ncj4
)/nbl
->nsci
;
4028 if (m
+ 1 > work
->sort_nalloc
)
4030 work
->sort_nalloc
= over_alloc_large(m
+ 1);
4031 srenew(work
->sort
, work
->sort_nalloc
);
4034 if (work
->sci_sort_nalloc
!= nbl
->sci_nalloc
)
4036 work
->sci_sort_nalloc
= nbl
->sci_nalloc
;
4037 nbnxn_realloc_void((void **)&work
->sci_sort
,
4039 work
->sci_sort_nalloc
*sizeof(*work
->sci_sort
),
4040 nbl
->alloc
, nbl
->free
);
4043 /* Count the entries of each size */
4044 for (int i
= 0; i
<= m
; i
++)
4048 for (int s
= 0; s
< nbl
->nsci
; s
++)
4050 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
4053 /* Calculate the offset for each count */
4056 for (int i
= m
- 1; i
>= 0; i
--)
4059 work
->sort
[i
] = work
->sort
[i
+ 1] + s0
;
4063 /* Sort entries directly into place */
4064 sci_sort
= work
->sci_sort
;
4065 for (int s
= 0; s
< nbl
->nsci
; s
++)
4067 int i
= std::min(m
, nbl
->sci
[s
].cj4_ind_end
- nbl
->sci
[s
].cj4_ind_start
);
4068 sci_sort
[work
->sort
[i
]++] = nbl
->sci
[s
];
4071 /* Swap the sci pointers so we use the new, sorted list */
4072 work
->sci_sort
= nbl
->sci
;
4073 nbl
->sci
= sci_sort
;
4076 /* Make a local or non-local pair-list, depending on iloc */
4077 void nbnxn_make_pairlist(const nbnxn_search_t nbs
,
4078 nbnxn_atomdata_t
*nbat
,
4079 const t_blocka
*excl
,
4081 int min_ci_balanced
,
4082 nbnxn_pairlist_set_t
*nbl_list
,
4087 nbnxn_grid_t
*gridi
, *gridj
;
4090 int nsubpair_target
;
4091 float nsubpair_tot_est
;
4093 nbnxn_pairlist_t
**nbl
;
4095 gmx_bool CombineNBLists
;
4097 int np_tot
, np_noq
, np_hlj
, nap
;
4099 /* Check if we are running hybrid GPU + CPU nbnxn mode */
4100 bGPUCPU
= (!nbs
->grid
[0].bSimple
&& nbl_list
->bSimple
);
4102 nnbl
= nbl_list
->nnbl
;
4103 nbl
= nbl_list
->nbl
;
4104 CombineNBLists
= nbl_list
->bCombined
;
4108 fprintf(debug
, "ns making %d nblists\n", nnbl
);
4111 nbat
->bUseBufferFlags
= (nbat
->nout
> 1);
4112 /* We should re-init the flags before making the first list */
4113 if (nbat
->bUseBufferFlags
&& (LOCAL_I(iloc
) || bGPUCPU
))
4115 init_buffer_flags(&nbat
->buffer_flags
, nbat
->natoms
);
4118 if (nbl_list
->bSimple
)
4121 switch (nb_kernel_type
)
4123 #ifdef GMX_NBNXN_SIMD_4XN
4124 case nbnxnk4xN_SIMD_4xN
:
4125 nbs
->icell_set_x
= icell_set_x_simd_4xn
;
4128 #ifdef GMX_NBNXN_SIMD_2XNN
4129 case nbnxnk4xN_SIMD_2xNN
:
4130 nbs
->icell_set_x
= icell_set_x_simd_2xnn
;
4134 nbs
->icell_set_x
= icell_set_x_simple
;
4138 /* MSVC 2013 complains about switch statements without case */
4139 nbs
->icell_set_x
= icell_set_x_simple
;
4144 nbs
->icell_set_x
= icell_set_x_supersub
;
4149 /* Only zone (grid) 0 vs 0 */
4156 nzi
= nbs
->zones
->nizone
;
4159 if (!nbl_list
->bSimple
&& min_ci_balanced
> 0)
4161 get_nsubpair_target(nbs
, iloc
, rlist
, min_ci_balanced
,
4162 &nsubpair_target
, &nsubpair_tot_est
);
4166 nsubpair_target
= 0;
4167 nsubpair_tot_est
= 0;
4170 /* Clear all pair-lists */
4171 for (int th
= 0; th
< nnbl
; th
++)
4173 clear_pairlist(nbl
[th
]);
4177 clear_pairlist_fep(nbl_list
->nbl_fep
[th
]);
4181 for (int zi
= 0; zi
< nzi
; zi
++)
4183 gridi
= &nbs
->grid
[zi
];
4185 if (NONLOCAL_I(iloc
))
4187 zj0
= nbs
->zones
->izone
[zi
].j0
;
4188 zj1
= nbs
->zones
->izone
[zi
].j1
;
4194 for (int zj
= zj0
; zj
< zj1
; zj
++)
4196 gridj
= &nbs
->grid
[zj
];
4200 fprintf(debug
, "ns search grid %d vs %d\n", zi
, zj
);
4203 nbs_cycle_start(&nbs
->cc
[enbsCCsearch
]);
4205 if (nbl
[0]->bSimple
&& !gridi
->bSimple
)
4207 /* Hybrid list, determine blocking later */
4212 ci_block
= get_ci_block_size(gridi
, nbs
->DomDec
, nnbl
);
4215 /* With GPU: generate progressively smaller lists for
4216 * load balancing for local only or non-local with 2 zones.
4218 progBal
= (LOCAL_I(iloc
) || nbs
->zones
->n
<= 2);
4220 #pragma omp parallel for num_threads(nnbl) schedule(static)
4221 for (int th
= 0; th
< nnbl
; th
++)
4225 /* Re-init the thread-local work flag data before making
4226 * the first list (not an elegant conditional).
4228 if (nbat
->bUseBufferFlags
&& ((zi
== 0 && zj
== 0) ||
4229 (bGPUCPU
&& zi
== 0 && zj
== 1)))
4231 init_buffer_flags(&nbs
->work
[th
].buffer_flags
, nbat
->natoms
);
4234 if (CombineNBLists
&& th
> 0)
4236 clear_pairlist(nbl
[th
]);
4239 /* Divide the i super cell equally over the nblists */
4240 nbnxn_make_pairlist_part(nbs
, gridi
, gridj
,
4241 &nbs
->work
[th
], nbat
, excl
,
4245 nbat
->bUseBufferFlags
,
4247 progBal
, nsubpair_tot_est
,
4250 nbl_list
->nbl_fep
[th
]);
4252 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4254 nbs_cycle_stop(&nbs
->cc
[enbsCCsearch
]);
4259 for (int th
= 0; th
< nnbl
; th
++)
4261 inc_nrnb(nrnb
, eNR_NBNXN_DIST2
, nbs
->work
[th
].ndistc
);
4263 if (nbl_list
->bSimple
)
4265 np_tot
+= nbl
[th
]->ncj
;
4266 np_noq
+= nbl
[th
]->work
->ncj_noq
;
4267 np_hlj
+= nbl
[th
]->work
->ncj_hlj
;
4271 /* This count ignores potential subsequent pair pruning */
4272 np_tot
+= nbl
[th
]->nci_tot
;
4275 nap
= nbl
[0]->na_ci
*nbl
[0]->na_cj
;
4276 nbl_list
->natpair_ljq
= (np_tot
- np_noq
)*nap
- np_hlj
*nap
/2;
4277 nbl_list
->natpair_lj
= np_noq
*nap
;
4278 nbl_list
->natpair_q
= np_hlj
*nap
/2;
4280 if (CombineNBLists
&& nnbl
> 1)
4282 nbs_cycle_start(&nbs
->cc
[enbsCCcombine
]);
4284 combine_nblists(nnbl
-1, nbl
+1, nbl
[0]);
4286 nbs_cycle_stop(&nbs
->cc
[enbsCCcombine
]);
4291 if (nbl_list
->bSimple
)
4293 if (nnbl
> 1 && checkRebalanceSimpleLists(nbl_list
))
4295 rebalanceSimpleLists(nbl_list
->nnbl
, nbl_list
->nbl
, nbl_list
->nbl_work
, nbs
->work
);
4297 /* Swap the pointer of the sets of pair lists */
4298 nbnxn_pairlist_t
**tmp
= nbl_list
->nbl
;
4299 nbl_list
->nbl
= nbl_list
->nbl_work
;
4300 nbl_list
->nbl_work
= tmp
;
4305 /* Sort the entries on size, large ones first */
4306 if (CombineNBLists
|| nnbl
== 1)
4312 #pragma omp parallel for num_threads(nnbl) schedule(static)
4313 for (int th
= 0; th
< nnbl
; th
++)
4319 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
4324 if (nbat
->bUseBufferFlags
)
4326 reduce_buffer_flags(nbs
, nbl_list
->nnbl
, &nbat
->buffer_flags
);
4331 /* Balance the free-energy lists over all the threads */
4332 balance_fep_lists(nbs
, nbl_list
);
4335 /* Special performance logging stuff (env.var. GMX_NBNXN_CYCLE) */
4338 nbs
->search_count
++;
4340 if (nbs
->print_cycles
&&
4341 (!nbs
->DomDec
|| !LOCAL_I(iloc
)) &&
4342 nbs
->search_count
% 100 == 0)
4344 nbs_cycle_print(stderr
, nbs
);
4347 /* If we have more than one list, they either got rebalancing (CPU)
4348 * or combined (GPU), so we should dump the final result to debug.
4350 if (debug
&& nbl_list
->nnbl
> 1)
4352 if (nbl_list
->bSimple
)
4354 for (int t
= 0; t
< nbl_list
->nnbl
; t
++)
4356 print_nblist_statistics_simple(debug
, nbl_list
->nbl
[t
], nbs
, rlist
);
4361 print_nblist_statistics_supersub(debug
, nbl_list
->nbl
[0], nbs
, rlist
);
4369 if (nbl_list
->bSimple
)
4371 for (int t
= 0; t
< nbl_list
->nnbl
; t
++)
4373 print_nblist_ci_cj(debug
, nbl_list
->nbl
[t
]);
4378 print_nblist_sci_cj(debug
, nbl_list
->nbl
[0]);
4382 if (nbat
->bUseBufferFlags
)
4384 print_reduction_cost(&nbat
->buffer_flags
, nbl_list
->nnbl
);