2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _nbnxn_kernel_simd_utils_x86_128s_h_
36 #define _nbnxn_kernel_simd_utils_x86_128s_h_
38 #include "gromacs/legacyheaders/types/simple.h"
40 /* This files contains all functions/macros for the SIMD kernels
41 * which have explicit dependencies on the j-cluster size and/or SIMD-width.
42 * The functionality which depends on the j-cluster size is:
45 * energy group pair energy storage
48 typedef gmx_simd_int32_t gmx_exclfilter
;
49 static const int filter_stride
= GMX_SIMD_INT32_WIDTH
/GMX_SIMD_REAL_WIDTH
;
51 /* Collect element 0 and 1 of the 4 inputs to out0 and out1, respectively */
52 static gmx_inline
void gmx_simdcall
53 gmx_shuffle_4_ps_fil01_to_2_ps(__m128 in0
, __m128 in1
, __m128 in2
, __m128 in3
,
54 __m128
*out0
, __m128
*out1
)
58 _c01
= _mm_movelh_ps(in0
, in1
);
59 _c23
= _mm_movelh_ps(in2
, in3
);
60 *out0
= _mm_shuffle_ps(_c01
, _c23
, _MM_SHUFFLE(2, 0, 2, 0));
61 *out1
= _mm_shuffle_ps(_c01
, _c23
, _MM_SHUFFLE(3, 1, 3, 1));
64 /* Collect element 2 of the 4 inputs to out */
65 static gmx_inline __m128 gmx_simdcall
66 gmx_shuffle_4_ps_fil2_to_1_ps(__m128 in0
, __m128 in1
, __m128 in2
, __m128 in3
)
70 _c01
= _mm_shuffle_ps(in0
, in1
, _MM_SHUFFLE(3, 2, 3, 2));
71 _c23
= _mm_shuffle_ps(in2
, in3
, _MM_SHUFFLE(3, 2, 3, 2));
73 return _mm_shuffle_ps(_c01
, _c23
, _MM_SHUFFLE(2, 0, 2, 0));
76 /* Sum the elements within each input register and store the sums in out */
77 static gmx_inline __m128 gmx_simdcall
78 gmx_mm_transpose_sum4_pr(__m128 in0
, __m128 in1
,
79 __m128 in2
, __m128 in3
)
81 _MM_TRANSPOSE4_PS(in0
, in1
, in2
, in3
);
82 in0
= _mm_add_ps(in0
, in1
);
83 in2
= _mm_add_ps(in2
, in3
);
85 return _mm_add_ps(in0
, in2
);
88 static gmx_inline
void
89 load_lj_pair_params(const real
*nbfp
, const int *type
, int aj
,
90 __m128
*c6_S
, __m128
*c12_S
)
92 __m128 clj_S
[UNROLLJ
];
95 for (p
= 0; p
< UNROLLJ
; p
++)
97 /* Here we load 4 aligned floats, but we need just 2 */
98 clj_S
[p
] = gmx_simd_load_r(nbfp
+type
[aj
+p
]*nbfp_stride
);
100 gmx_shuffle_4_ps_fil01_to_2_ps(clj_S
[0], clj_S
[1], clj_S
[2], clj_S
[3], c6_S
, c12_S
);
103 /* The load_table functions below are performance critical.
104 * The routines issue UNROLLI*UNROLLJ _mm_load_ps calls.
105 * As these all have latencies, scheduling is crucial.
106 * The Intel compilers and CPUs seem to do a good job at this.
107 * But AMD CPUs perform significantly worse with gcc than with icc.
108 * Performance is improved a bit by using the extract function UNROLLJ times,
109 * instead of doing an _mm_store_si128 for every i-particle.
110 * This is only faster when we use FDV0 formatted tables, where we also need
111 * to multiple the index by 4, which can be done by a SIMD bit shift.
112 * With single precision AVX, 8 extracts are much slower than 1 store.
113 * Because of this, the load_table_f function always takes the ti
114 * parameter, which should contain a buffer that is aligned with
115 * prepare_table_load_buffer(), but it is only used with full-width
118 static gmx_inline
void gmx_simdcall
119 load_table_f(const real
*tab_coul_FDV0
, gmx_simd_int32_t ti_S
, int gmx_unused
*ti
,
120 __m128
*ctab0_S
, __m128
*ctab1_S
)
125 /* Table has 4 entries, left-shift index by 2 */
126 ti_S
= _mm_slli_epi32(ti_S
, 2);
127 /* Without SSE4.1 the extract macro needs an immediate: unroll */
128 idx
[0] = gmx_simd_extract_i(ti_S
, 0);
129 ctab_S
[0] = _mm_load_ps(tab_coul_FDV0
+idx
[0]);
130 idx
[1] = gmx_simd_extract_i(ti_S
, 1);
131 ctab_S
[1] = _mm_load_ps(tab_coul_FDV0
+idx
[1]);
132 idx
[2] = gmx_simd_extract_i(ti_S
, 2);
133 ctab_S
[2] = _mm_load_ps(tab_coul_FDV0
+idx
[2]);
134 idx
[3] = gmx_simd_extract_i(ti_S
, 3);
135 ctab_S
[3] = _mm_load_ps(tab_coul_FDV0
+idx
[3]);
137 /* Shuffle the force table entries to a convenient order */
138 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S
[0], ctab_S
[1], ctab_S
[2], ctab_S
[3], ctab0_S
, ctab1_S
);
141 static gmx_inline
void gmx_simdcall
142 load_table_f_v(const real
*tab_coul_FDV0
, gmx_simd_int32_t ti_S
, int gmx_unused
*ti
,
143 __m128
*ctab0_S
, __m128
*ctab1_S
, __m128
*ctabv_S
)
148 /* Table has 4 entries, left-shift index by 2 */
149 ti_S
= _mm_slli_epi32(ti_S
, 2);
150 /* Without SSE4.1 the extract macro needs an immediate: unroll */
151 idx
[0] = gmx_simd_extract_i(ti_S
, 0);
152 ctab_S
[0] = _mm_load_ps(tab_coul_FDV0
+idx
[0]);
153 idx
[1] = gmx_simd_extract_i(ti_S
, 1);
154 ctab_S
[1] = _mm_load_ps(tab_coul_FDV0
+idx
[1]);
155 idx
[2] = gmx_simd_extract_i(ti_S
, 2);
156 ctab_S
[2] = _mm_load_ps(tab_coul_FDV0
+idx
[2]);
157 idx
[3] = gmx_simd_extract_i(ti_S
, 3);
158 ctab_S
[3] = _mm_load_ps(tab_coul_FDV0
+idx
[3]);
160 /* Shuffle the force table entries to a convenient order */
161 gmx_shuffle_4_ps_fil01_to_2_ps(ctab_S
[0], ctab_S
[1], ctab_S
[2], ctab_S
[3], ctab0_S
, ctab1_S
);
163 *ctabv_S
= gmx_shuffle_4_ps_fil2_to_1_ps(ctab_S
[0], ctab_S
[1], ctab_S
[2], ctab_S
[3]);
166 static gmx_inline gmx_exclfilter gmx_simdcall
167 gmx_load1_exclfilter(int e
)
169 return _mm_set1_epi32(e
);
172 static gmx_inline gmx_exclfilter gmx_simdcall
173 gmx_load_exclusion_filter(const unsigned *i
)
175 return gmx_simd_load_i(i
);
178 static gmx_inline gmx_simd_bool_t gmx_simdcall
179 gmx_checkbitmask_pb(gmx_exclfilter m0
, gmx_exclfilter m1
)
181 return _mm_castsi128_ps(_mm_cmpeq_epi32(_mm_andnot_si128(m0
, m1
), _mm_setzero_si128()));
184 #endif /* _nbnxn_kernel_simd_utils_x86_s128s_h_ */