2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2014,2015,2017,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_SIMD_IMPL_X86_MIC_SIMD4_DOUBLE_H
37 #define GMX_SIMD_IMPL_X86_MIC_SIMD4_DOUBLE_H
43 #include <immintrin.h>
45 #include "gromacs/utility/basedefinitions.h"
47 #include "impl_x86_mic_simd_double.h"
57 Simd4Double(double d
) : simdInternal_(_mm512_set1_pd(d
)) {}
59 // Internal utility constructor to simplify return statements
60 Simd4Double(__m512d simd
) : simdInternal_(simd
) {}
62 __m512d simdInternal_
;
70 // Internal utility constructor to simplify return statements
71 Simd4DBool(__mmask16 simd
) : simdInternal_(simd
) {}
73 __mmask16 simdInternal_
;
76 static inline Simd4Double gmx_simdcall
load4(const double* m
)
78 assert(size_t(m
) % 32 == 0);
79 return { _mm512_mask_extload_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), m
,
80 _MM_UPCONV_PD_NONE
, _MM_BROADCAST_4X8
, _MM_HINT_NONE
) };
83 static inline void gmx_simdcall
store4(double* m
, Simd4Double a
)
85 assert(size_t(m
) % 32 == 0);
86 _mm512_mask_packstorelo_pd(m
, _mm512_int2mask(0xF), a
.simdInternal_
);
89 static inline Simd4Double gmx_simdcall
load4U(const double* m
)
91 return { _mm512_mask_loadunpackhi_pd(
92 _mm512_mask_loadunpacklo_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), m
),
93 _mm512_int2mask(0xF), m
+ 8) };
96 static inline void gmx_simdcall
store4U(double* m
, Simd4Double a
)
98 _mm512_mask_packstorelo_pd(m
, _mm512_int2mask(0xF), a
.simdInternal_
);
99 _mm512_mask_packstorehi_pd(m
+ 8, _mm512_int2mask(0xF), a
.simdInternal_
);
102 static inline Simd4Double gmx_simdcall
simd4SetZeroD()
104 return { _mm512_setzero_pd() };
107 static inline Simd4Double gmx_simdcall
operator&(Simd4Double a
, Simd4Double b
)
109 return { _mm512_castsi512_pd(_mm512_mask_and_epi32(
110 _mm512_undefined_epi32(), _mm512_int2mask(0x00FF), _mm512_castpd_si512(a
.simdInternal_
),
111 _mm512_castpd_si512(b
.simdInternal_
))) };
114 static inline Simd4Double gmx_simdcall
andNot(Simd4Double a
, Simd4Double b
)
116 return { _mm512_castsi512_pd(_mm512_mask_andnot_epi32(
117 _mm512_undefined_epi32(), _mm512_int2mask(0x00FF), _mm512_castpd_si512(a
.simdInternal_
),
118 _mm512_castpd_si512(b
.simdInternal_
))) };
121 static inline Simd4Double gmx_simdcall
operator|(Simd4Double a
, Simd4Double b
)
123 return { _mm512_castsi512_pd(_mm512_mask_or_epi32(
124 _mm512_undefined_epi32(), _mm512_int2mask(0x00FF), _mm512_castpd_si512(a
.simdInternal_
),
125 _mm512_castpd_si512(b
.simdInternal_
))) };
128 static inline Simd4Double gmx_simdcall
operator^(Simd4Double a
, Simd4Double b
)
130 return { _mm512_castsi512_pd(_mm512_mask_xor_epi32(
131 _mm512_undefined_epi32(), _mm512_int2mask(0x00FF), _mm512_castpd_si512(a
.simdInternal_
),
132 _mm512_castpd_si512(b
.simdInternal_
))) };
135 static inline Simd4Double gmx_simdcall
operator+(Simd4Double a
, Simd4Double b
)
137 return { _mm512_mask_add_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), a
.simdInternal_
,
141 static inline Simd4Double gmx_simdcall
operator-(Simd4Double a
, Simd4Double b
)
143 return { _mm512_mask_sub_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), a
.simdInternal_
,
147 static inline Simd4Double gmx_simdcall
operator-(Simd4Double x
)
149 return { _mm512_mask_addn_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), x
.simdInternal_
,
150 _mm512_setzero_pd()) };
153 static inline Simd4Double gmx_simdcall
operator*(Simd4Double a
, Simd4Double b
)
155 return { _mm512_mask_mul_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), a
.simdInternal_
,
159 static inline Simd4Double gmx_simdcall
fma(Simd4Double a
, Simd4Double b
, Simd4Double c
)
161 return { _mm512_mask_fmadd_pd(a
.simdInternal_
, _mm512_int2mask(0xF), b
.simdInternal_
, c
.simdInternal_
) };
164 static inline Simd4Double gmx_simdcall
fms(Simd4Double a
, Simd4Double b
, Simd4Double c
)
166 return { _mm512_mask_fmsub_pd(a
.simdInternal_
, _mm512_int2mask(0xF), b
.simdInternal_
, c
.simdInternal_
) };
169 static inline Simd4Double gmx_simdcall
fnma(Simd4Double a
, Simd4Double b
, Simd4Double c
)
171 return { _mm512_mask_fnmadd_pd(a
.simdInternal_
, _mm512_int2mask(0xF), b
.simdInternal_
, c
.simdInternal_
) };
174 static inline Simd4Double gmx_simdcall
fnms(Simd4Double a
, Simd4Double b
, Simd4Double c
)
176 return { _mm512_mask_fnmsub_pd(a
.simdInternal_
, _mm512_int2mask(0xF), b
.simdInternal_
, c
.simdInternal_
) };
179 static inline Simd4Double gmx_simdcall
rsqrt(Simd4Double x
)
181 return { _mm512_mask_cvtpslo_pd(
182 _mm512_undefined_pd(), _mm512_int2mask(0xF),
183 _mm512_mask_rsqrt23_ps(_mm512_undefined_ps(), _mm512_int2mask(0xF),
184 _mm512_mask_cvtpd_pslo(_mm512_undefined_ps(),
185 _mm512_int2mask(0xF), x
.simdInternal_
))) };
188 static inline Simd4Double gmx_simdcall
abs(Simd4Double x
)
190 return { _mm512_castsi512_pd(_mm512_mask_andnot_epi32(
191 _mm512_undefined_epi32(), _mm512_int2mask(0x00FF),
192 _mm512_castpd_si512(_mm512_set1_pd(GMX_DOUBLE_NEGZERO
)), _mm512_castpd_si512(x
.simdInternal_
)))
197 static inline Simd4Double gmx_simdcall
max(Simd4Double a
, Simd4Double b
)
199 return { _mm512_mask_gmax_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), a
.simdInternal_
,
203 static inline Simd4Double gmx_simdcall
min(Simd4Double a
, Simd4Double b
)
205 return { _mm512_mask_gmin_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), a
.simdInternal_
,
209 static inline Simd4Double gmx_simdcall
round(Simd4Double x
)
211 return { _mm512_mask_roundfxpnt_adjust_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF), x
.simdInternal_
,
212 _MM_FROUND_TO_NEAREST_INT
, _MM_EXPADJ_NONE
) };
215 static inline Simd4Double gmx_simdcall
trunc(Simd4Double x
)
217 return { _mm512_mask_roundfxpnt_adjust_pd(_mm512_undefined_pd(), _mm512_int2mask(0xF),
218 x
.simdInternal_
, _MM_FROUND_TO_ZERO
, _MM_EXPADJ_NONE
) };
221 static inline double gmx_simdcall
dotProduct(Simd4Double a
, Simd4Double b
)
223 return _mm512_mask_reduce_add_pd(_mm512_int2mask(7),
224 _mm512_mask_mul_pd(_mm512_undefined_pd(), _mm512_int2mask(7),
225 a
.simdInternal_
, b
.simdInternal_
));
228 static inline void gmx_simdcall
transpose(Simd4Double
* v0
, Simd4Double
* v1
, Simd4Double
* v2
, Simd4Double
* v3
)
230 __m512i t0
= _mm512_mask_permute4f128_epi32(_mm512_castpd_si512(v0
->simdInternal_
), 0xFF00,
231 _mm512_castpd_si512(v1
->simdInternal_
), _MM_PERM_BABA
);
232 __m512i t1
= _mm512_mask_permute4f128_epi32(_mm512_castpd_si512(v2
->simdInternal_
), 0xFF00,
233 _mm512_castpd_si512(v3
->simdInternal_
), _MM_PERM_BABA
);
235 t0
= _mm512_permutevar_epi32(
236 _mm512_set_epi32(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0), t0
);
237 t1
= _mm512_permutevar_epi32(
238 _mm512_set_epi32(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0), t1
);
240 v0
->simdInternal_
= _mm512_mask_swizzle_pd(_mm512_castsi512_pd(t0
), _mm512_int2mask(0xCC),
241 _mm512_castsi512_pd(t1
), _MM_SWIZ_REG_BADC
);
242 v1
->simdInternal_
= _mm512_mask_swizzle_pd(_mm512_castsi512_pd(t1
), _mm512_int2mask(0x33),
243 _mm512_castsi512_pd(t0
), _MM_SWIZ_REG_BADC
);
246 _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(v0
->simdInternal_
), _MM_PERM_DCDC
));
248 _mm512_castps_pd(_mm512_permute4f128_ps(_mm512_castpd_ps(v1
->simdInternal_
), _MM_PERM_DCDC
));
251 // Picky, picky, picky:
252 // icc-16 complains about "Illegal value of immediate argument to intrinsic"
254 // 1) Ordered-quiet for ==
255 // 2) Unordered-quiet for !=
256 // 3) Ordered-signaling for < and <=
258 static inline Simd4DBool gmx_simdcall
operator==(Simd4Double a
, Simd4Double b
)
260 return { _mm512_mask_cmp_pd_mask(_mm512_int2mask(0xF), a
.simdInternal_
, b
.simdInternal_
, _CMP_EQ_OQ
) };
263 static inline Simd4DBool gmx_simdcall
operator!=(Simd4Double a
, Simd4Double b
)
265 return { _mm512_mask_cmp_pd_mask(_mm512_int2mask(0xF), a
.simdInternal_
, b
.simdInternal_
, _CMP_NEQ_UQ
) };
268 static inline Simd4DBool gmx_simdcall
operator<(Simd4Double a
, Simd4Double b
)
270 return { _mm512_mask_cmp_pd_mask(_mm512_int2mask(0xF), a
.simdInternal_
, b
.simdInternal_
, _CMP_LT_OS
) };
273 static inline Simd4DBool gmx_simdcall
operator<=(Simd4Double a
, Simd4Double b
)
275 return { _mm512_mask_cmp_pd_mask(_mm512_int2mask(0xF), a
.simdInternal_
, b
.simdInternal_
, _CMP_LE_OS
) };
278 static inline Simd4DBool gmx_simdcall
operator&&(Simd4DBool a
, Simd4DBool b
)
280 return { _mm512_kand(a
.simdInternal_
, b
.simdInternal_
) };
283 static inline Simd4DBool gmx_simdcall
operator||(Simd4DBool a
, Simd4DBool b
)
285 return { _mm512_kor(a
.simdInternal_
, b
.simdInternal_
) };
288 static inline bool gmx_simdcall
anyTrue(Simd4DBool a
)
290 return (_mm512_mask2int(a
.simdInternal_
) & 0xF) != 0;
293 static inline Simd4Double gmx_simdcall
selectByMask(Simd4Double a
, Simd4DBool m
)
295 return { _mm512_mask_mov_pd(_mm512_setzero_pd(), m
.simdInternal_
, a
.simdInternal_
) };
298 static inline Simd4Double gmx_simdcall
selectByNotMask(Simd4Double a
, Simd4DBool m
)
300 return { _mm512_mask_mov_pd(_mm512_setzero_pd(), _mm512_knot(m
.simdInternal_
), a
.simdInternal_
) };
303 static inline Simd4Double gmx_simdcall
blend(Simd4Double a
, Simd4Double b
, Simd4DBool sel
)
305 return { _mm512_mask_blend_pd(sel
.simdInternal_
, a
.simdInternal_
, b
.simdInternal_
) };
308 static inline double gmx_simdcall
reduce(Simd4Double a
)
310 return _mm512_mask_reduce_add_pd(_mm512_int2mask(0xF), a
.simdInternal_
);
315 #endif // GMX_SIMD_IMPL_X86_MIC_SIMD4_DOUBLE_H