Fix message about incorrect usage of dihedral type 9
[gromacs/AngularHB.git] / include / gmx_math_x86_avx_128_fma_double.h
blob8cb14bd684662836d79a6f19c538d385c7d7e57a
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
6 * others, as listed in the AUTHORS file in the top-level source
7 * directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef _gmx_math_x86_avx_128_fma_double_h_
36 #define _gmx_math_x86_avx_128_fma_double_h_
38 #include <immintrin.h> /* AVX */
39 #ifdef HAVE_X86INTRIN_H
40 #include <x86intrin.h> /* FMA */
41 #endif
42 #ifdef HAVE_INTRIN_H
43 #include <intrin.h> /* FMA MSVC */
44 #endif
46 #include <math.h>
48 #include "gmx_x86_avx_128_fma.h"
51 #ifndef M_PI
52 # define M_PI 3.14159265358979323846264338327950288
53 #endif
56 /************************
57 * *
58 * Simple math routines *
59 * *
60 ************************/
62 /* 1.0/sqrt(x) */
63 static gmx_inline __m128d
64 gmx_mm_invsqrt_pd(__m128d x)
66 const __m128d half = _mm_set1_pd(0.5);
67 const __m128d three = _mm_set1_pd(3.0);
69 /* Lookup instruction only exists in single precision, convert back and forth... */
70 __m128d lu = _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x)));
72 lu = _mm_mul_pd(_mm_mul_pd(half, lu), _mm_nmacc_pd(_mm_mul_pd(lu, lu), x, three));
73 return _mm_mul_pd(_mm_mul_pd(half, lu), _mm_nmacc_pd(_mm_mul_pd(lu, lu), x, three));
76 /* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
77 static void
78 gmx_mm_invsqrt_pair_pd(__m128d x1, __m128d x2, __m128d *invsqrt1, __m128d *invsqrt2)
80 const __m128d half = _mm_set1_pd(0.5);
81 const __m128d three = _mm_set1_pd(3.0);
82 const __m128 halff = _mm_set1_ps(0.5f);
83 const __m128 threef = _mm_set1_ps(3.0f);
85 __m128 xf, luf;
86 __m128d lu1, lu2;
88 /* Do first N-R step in float for 2x throughput */
89 xf = _mm_shuffle_ps(_mm_cvtpd_ps(x1), _mm_cvtpd_ps(x2), _MM_SHUFFLE(1, 0, 1, 0));
90 luf = _mm_rsqrt_ps(xf);
92 luf = _mm_mul_ps(_mm_mul_ps(halff, luf), _mm_nmacc_ps(_mm_mul_ps(luf, luf), xf, threef));
95 lu2 = _mm_cvtps_pd(_mm_shuffle_ps(luf, luf, _MM_SHUFFLE(3, 2, 3, 2)));
96 lu1 = _mm_cvtps_pd(luf);
98 *invsqrt1 = _mm_mul_pd(_mm_mul_pd(half, lu1), _mm_nmacc_pd(_mm_mul_pd(lu1, lu1), x1, three));
99 *invsqrt2 = _mm_mul_pd(_mm_mul_pd(half, lu2), _mm_nmacc_pd(_mm_mul_pd(lu2, lu2), x2, three));
102 /* sqrt(x) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
103 static gmx_inline __m128d
104 gmx_mm_sqrt_pd(__m128d x)
106 __m128d mask;
107 __m128d res;
109 mask = _mm_cmpeq_pd(x, _mm_setzero_pd());
110 res = _mm_andnot_pd(mask, gmx_mm_invsqrt_pd(x));
112 res = _mm_mul_pd(x, res);
114 return res;
117 /* 1.0/x */
118 static gmx_inline __m128d
119 gmx_mm_inv_pd(__m128d x)
121 const __m128d two = _mm_set1_pd(2.0);
123 /* Lookup instruction only exists in single precision, convert back and forth... */
124 __m128d lu = _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x)));
126 /* Perform two N-R steps for double precision */
127 lu = _mm_mul_pd(lu, _mm_nmacc_pd(lu, x, two));
128 return _mm_mul_pd(lu, _mm_nmacc_pd(lu, x, two));
131 static gmx_inline __m128d
132 gmx_mm_abs_pd(__m128d x)
134 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
136 return _mm_and_pd(x, signmask);
141 * 2^x function.
143 * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
144 * [-0.5,0.5].
146 * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
147 * according to the same algorithm as used in the Cephes/netlib math routines.
149 static __m128d
150 gmx_mm_exp2_pd(__m128d x)
152 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
153 const __m128d arglimit = _mm_set1_pd(1022.0);
154 const __m128i expbase = _mm_set1_epi32(1023);
156 const __m128d P2 = _mm_set1_pd(2.30933477057345225087e-2);
157 const __m128d P1 = _mm_set1_pd(2.02020656693165307700e1);
158 const __m128d P0 = _mm_set1_pd(1.51390680115615096133e3);
159 /* Q2 == 1.0 */
160 const __m128d Q1 = _mm_set1_pd(2.33184211722314911771e2);
161 const __m128d Q0 = _mm_set1_pd(4.36821166879210612817e3);
162 const __m128d one = _mm_set1_pd(1.0);
163 const __m128d two = _mm_set1_pd(2.0);
165 __m128d valuemask;
166 __m128i iexppart;
167 __m128d fexppart;
168 __m128d intpart;
169 __m128d z, z2;
170 __m128d PolyP, PolyQ;
172 iexppart = _mm_cvtpd_epi32(x);
173 intpart = _mm_round_pd(x, _MM_FROUND_TO_NEAREST_INT);
175 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
176 * To be able to shift it into the exponent for a double precision number we first need to
177 * shuffle so that the lower half contains the first element, and the upper half the second.
178 * This should really be done as a zero-extension, but since the next instructions will shift
179 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
180 * (thus we just use element 2 from iexppart).
182 iexppart = _mm_shuffle_epi32(iexppart, _MM_SHUFFLE(2, 1, 2, 0));
184 /* Do the shift operation on the 64-bit registers */
185 iexppart = _mm_add_epi32(iexppart, expbase);
186 iexppart = _mm_slli_epi64(iexppart, 52);
188 valuemask = _mm_cmpge_pd(arglimit, gmx_mm_abs_pd(x));
189 fexppart = _mm_and_pd(valuemask, gmx_mm_castsi128_pd(iexppart));
191 z = _mm_sub_pd(x, intpart);
192 z2 = _mm_mul_pd(z, z);
194 PolyP = _mm_macc_pd(P2, z2, P1);
195 PolyQ = _mm_add_pd(z2, Q1);
196 PolyP = _mm_macc_pd(PolyP, z2, P0);
197 PolyQ = _mm_macc_pd(PolyQ, z2, Q0);
198 PolyP = _mm_mul_pd(PolyP, z);
200 z = _mm_mul_pd(PolyP, gmx_mm_inv_pd(_mm_sub_pd(PolyQ, PolyP)));
201 z = _mm_macc_pd(two, z, one);
203 z = _mm_mul_pd(z, fexppart);
205 return z;
208 /* Exponential function. This could be calculated from 2^x as Exp(x)=2^(y), where y=log2(e)*x,
209 * but there will then be a small rounding error since we lose some precision due to the
210 * multiplication. This will then be magnified a lot by the exponential.
212 * Instead, we calculate the fractional part directly as a Padé approximation of
213 * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
214 * remaining after 2^y, which avoids the precision-loss.
216 static __m128d
217 gmx_mm_exp_pd(__m128d exparg)
219 const __m128d argscale = _mm_set1_pd(1.4426950408889634073599);
220 /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
221 const __m128d arglimit = _mm_set1_pd(1022.0);
222 const __m128i expbase = _mm_set1_epi32(1023);
224 const __m128d invargscale0 = _mm_set1_pd(6.93145751953125e-1);
225 const __m128d invargscale1 = _mm_set1_pd(1.42860682030941723212e-6);
227 const __m128d P2 = _mm_set1_pd(1.26177193074810590878e-4);
228 const __m128d P1 = _mm_set1_pd(3.02994407707441961300e-2);
229 /* P0 == 1.0 */
230 const __m128d Q3 = _mm_set1_pd(3.00198505138664455042E-6);
231 const __m128d Q2 = _mm_set1_pd(2.52448340349684104192E-3);
232 const __m128d Q1 = _mm_set1_pd(2.27265548208155028766E-1);
233 /* Q0 == 2.0 */
234 const __m128d one = _mm_set1_pd(1.0);
235 const __m128d two = _mm_set1_pd(2.0);
237 __m128d valuemask;
238 __m128i iexppart;
239 __m128d fexppart;
240 __m128d intpart;
241 __m128d x, z, z2;
242 __m128d PolyP, PolyQ;
244 x = _mm_mul_pd(exparg, argscale);
246 iexppart = _mm_cvtpd_epi32(x);
247 intpart = _mm_round_pd(x, _MM_FROUND_TO_NEAREST_INT);
249 /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
250 * To be able to shift it into the exponent for a double precision number we first need to
251 * shuffle so that the lower half contains the first element, and the upper half the second.
252 * This should really be done as a zero-extension, but since the next instructions will shift
253 * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
254 * (thus we just use element 2 from iexppart).
256 iexppart = _mm_shuffle_epi32(iexppart, _MM_SHUFFLE(2, 1, 2, 0));
258 /* Do the shift operation on the 64-bit registers */
259 iexppart = _mm_add_epi32(iexppart, expbase);
260 iexppart = _mm_slli_epi64(iexppart, 52);
262 valuemask = _mm_cmpge_pd(arglimit, gmx_mm_abs_pd(x));
263 fexppart = _mm_and_pd(valuemask, gmx_mm_castsi128_pd(iexppart));
265 z = _mm_sub_pd(exparg, _mm_mul_pd(invargscale0, intpart));
266 z = _mm_sub_pd(z, _mm_mul_pd(invargscale1, intpart));
268 z2 = _mm_mul_pd(z, z);
270 PolyQ = _mm_macc_pd(Q3, z2, Q2);
271 PolyP = _mm_macc_pd(P2, z2, P1);
272 PolyQ = _mm_macc_pd(PolyQ, z2, Q1);
274 PolyP = _mm_macc_pd(PolyP, z2, one);
275 PolyQ = _mm_macc_pd(PolyQ, z2, two);
277 PolyP = _mm_mul_pd(PolyP, z);
279 z = _mm_mul_pd(PolyP, gmx_mm_inv_pd(_mm_sub_pd(PolyQ, PolyP)));
280 z = _mm_macc_pd(two, z, one);
282 z = _mm_mul_pd(z, fexppart);
284 return z;
289 static __m128d
290 gmx_mm_log_pd(__m128d x)
292 /* Same algorithm as cephes library */
293 const __m128d expmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
295 const __m128i expbase_m1 = _mm_set1_epi32(1023-1); /* We want non-IEEE format */
297 const __m128d half = _mm_set1_pd(0.5);
298 const __m128d one = _mm_set1_pd(1.0);
299 const __m128d two = _mm_set1_pd(2.0);
300 const __m128d invsq2 = _mm_set1_pd(1.0/sqrt(2.0));
302 const __m128d corr1 = _mm_set1_pd(-2.121944400546905827679e-4);
303 const __m128d corr2 = _mm_set1_pd(0.693359375);
305 const __m128d P5 = _mm_set1_pd(1.01875663804580931796e-4);
306 const __m128d P4 = _mm_set1_pd(4.97494994976747001425e-1);
307 const __m128d P3 = _mm_set1_pd(4.70579119878881725854e0);
308 const __m128d P2 = _mm_set1_pd(1.44989225341610930846e1);
309 const __m128d P1 = _mm_set1_pd(1.79368678507819816313e1);
310 const __m128d P0 = _mm_set1_pd(7.70838733755885391666e0);
312 const __m128d Q4 = _mm_set1_pd(1.12873587189167450590e1);
313 const __m128d Q3 = _mm_set1_pd(4.52279145837532221105e1);
314 const __m128d Q2 = _mm_set1_pd(8.29875266912776603211e1);
315 const __m128d Q1 = _mm_set1_pd(7.11544750618563894466e1);
316 const __m128d Q0 = _mm_set1_pd(2.31251620126765340583e1);
318 const __m128d R2 = _mm_set1_pd(-7.89580278884799154124e-1);
319 const __m128d R1 = _mm_set1_pd(1.63866645699558079767e1);
320 const __m128d R0 = _mm_set1_pd(-6.41409952958715622951e1);
322 const __m128d S2 = _mm_set1_pd(-3.56722798256324312549E1);
323 const __m128d S1 = _mm_set1_pd(3.12093766372244180303E2);
324 const __m128d S0 = _mm_set1_pd(-7.69691943550460008604E2);
326 __m128d fexp;
327 __m128i iexp;
329 __m128d mask1, mask2;
330 __m128d corr, t1, t2, q;
331 __m128d zA, yA, xA, zB, yB, xB, z;
332 __m128d polyR, polyS;
333 __m128d polyP1, polyP2, polyQ1, polyQ2;
335 /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
336 fexp = _mm_and_pd(x, expmask);
337 iexp = gmx_mm_castpd_si128(fexp);
338 iexp = _mm_srli_epi64(iexp, 52);
339 iexp = _mm_sub_epi32(iexp, expbase_m1);
340 iexp = _mm_shuffle_epi32(iexp, _MM_SHUFFLE(1, 1, 2, 0) );
341 fexp = _mm_cvtepi32_pd(iexp);
343 x = _mm_andnot_pd(expmask, x);
344 x = _mm_or_pd(x, one);
345 x = _mm_mul_pd(x, half);
347 mask1 = _mm_cmpgt_pd(gmx_mm_abs_pd(fexp), two);
348 mask2 = _mm_cmplt_pd(x, invsq2);
350 fexp = _mm_sub_pd(fexp, _mm_and_pd(mask2, one));
352 /* If mask1 is set ('A') */
353 zA = _mm_sub_pd(x, half);
354 t1 = _mm_blendv_pd( zA, x, mask2 );
355 zA = _mm_sub_pd(t1, half);
356 t2 = _mm_blendv_pd( x, zA, mask2 );
357 yA = _mm_mul_pd(half, _mm_add_pd(t2, one));
359 xA = _mm_mul_pd(zA, gmx_mm_inv_pd(yA));
360 zA = _mm_mul_pd(xA, xA);
362 /* EVALUATE POLY */
363 polyR = _mm_macc_pd(R2, zA, R1);
364 polyR = _mm_macc_pd(polyR, zA, R0);
366 polyS = _mm_add_pd(zA, S2);
367 polyS = _mm_macc_pd(polyS, zA, S1);
368 polyS = _mm_macc_pd(polyS, zA, S0);
370 q = _mm_mul_pd(polyR, gmx_mm_inv_pd(polyS));
371 zA = _mm_mul_pd(_mm_mul_pd(xA, zA), q);
373 zA = _mm_macc_pd(corr1, fexp, zA);
374 zA = _mm_add_pd(zA, xA);
375 zA = _mm_macc_pd(corr2, fexp, zA);
377 /* If mask1 is not set ('B') */
378 corr = _mm_and_pd(mask2, x);
379 xB = _mm_add_pd(x, corr);
380 xB = _mm_sub_pd(xB, one);
381 zB = _mm_mul_pd(xB, xB);
383 polyP1 = _mm_macc_pd(P5, zB, P3);
384 polyP2 = _mm_macc_pd(P4, zB, P2);
385 polyP1 = _mm_macc_pd(polyP1, zB, P1);
386 polyP2 = _mm_macc_pd(polyP2, zB, P0);
387 polyP1 = _mm_macc_pd(polyP1, xB, polyP2);
389 polyQ2 = _mm_macc_pd(Q4, zB, Q2);
390 polyQ1 = _mm_add_pd(zB, Q3);
391 polyQ1 = _mm_macc_pd(polyQ1, zB, Q1);
392 polyQ2 = _mm_macc_pd(polyQ2, zB, Q0);
393 polyQ1 = _mm_macc_pd(polyQ1, xB, polyQ2);
395 fexp = _mm_and_pd(fexp, _mm_cmpneq_pd(fexp, _mm_setzero_pd()));
397 q = _mm_mul_pd(polyP1, gmx_mm_inv_pd(polyQ1));
398 yB = _mm_macc_pd(_mm_mul_pd(xB, zB), q, _mm_mul_pd(corr1, fexp));
400 yB = _mm_nmacc_pd(half, zB, yB);
401 zB = _mm_add_pd(xB, yB);
402 zB = _mm_macc_pd(corr2, fexp, zB);
404 z = _mm_blendv_pd( zB, zA, mask1 );
406 return z;
411 static __m128d
412 gmx_mm_erf_pd(__m128d x)
414 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
415 const __m128d CAP4 = _mm_set1_pd(-0.431780540597889301512e-4);
416 const __m128d CAP3 = _mm_set1_pd(-0.00578562306260059236059);
417 const __m128d CAP2 = _mm_set1_pd(-0.028593586920219752446);
418 const __m128d CAP1 = _mm_set1_pd(-0.315924962948621698209);
419 const __m128d CAP0 = _mm_set1_pd(0.14952975608477029151);
421 const __m128d CAQ5 = _mm_set1_pd(-0.374089300177174709737e-5);
422 const __m128d CAQ4 = _mm_set1_pd(0.00015126584532155383535);
423 const __m128d CAQ3 = _mm_set1_pd(0.00536692680669480725423);
424 const __m128d CAQ2 = _mm_set1_pd(0.0668686825594046122636);
425 const __m128d CAQ1 = _mm_set1_pd(0.402604990869284362773);
426 /* CAQ0 == 1.0 */
427 const __m128d CAoffset = _mm_set1_pd(0.9788494110107421875);
429 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
430 const __m128d CBP6 = _mm_set1_pd(2.49650423685462752497647637088e-10);
431 const __m128d CBP5 = _mm_set1_pd(0.00119770193298159629350136085658);
432 const __m128d CBP4 = _mm_set1_pd(0.0164944422378370965881008942733);
433 const __m128d CBP3 = _mm_set1_pd(0.0984581468691775932063932439252);
434 const __m128d CBP2 = _mm_set1_pd(0.317364595806937763843589437418);
435 const __m128d CBP1 = _mm_set1_pd(0.554167062641455850932670067075);
436 const __m128d CBP0 = _mm_set1_pd(0.427583576155807163756925301060);
437 const __m128d CBQ7 = _mm_set1_pd(0.00212288829699830145976198384930);
438 const __m128d CBQ6 = _mm_set1_pd(0.0334810979522685300554606393425);
439 const __m128d CBQ5 = _mm_set1_pd(0.2361713785181450957579508850717);
440 const __m128d CBQ4 = _mm_set1_pd(0.955364736493055670530981883072);
441 const __m128d CBQ3 = _mm_set1_pd(2.36815675631420037315349279199);
442 const __m128d CBQ2 = _mm_set1_pd(3.55261649184083035537184223542);
443 const __m128d CBQ1 = _mm_set1_pd(2.93501136050160872574376997993);
444 /* CBQ0 == 1.0 */
446 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
447 const __m128d CCP6 = _mm_set1_pd(-2.8175401114513378771);
448 const __m128d CCP5 = _mm_set1_pd(-3.22729451764143718517);
449 const __m128d CCP4 = _mm_set1_pd(-2.5518551727311523996);
450 const __m128d CCP3 = _mm_set1_pd(-0.687717681153649930619);
451 const __m128d CCP2 = _mm_set1_pd(-0.212652252872804219852);
452 const __m128d CCP1 = _mm_set1_pd(0.0175389834052493308818);
453 const __m128d CCP0 = _mm_set1_pd(0.00628057170626964891937);
455 const __m128d CCQ6 = _mm_set1_pd(5.48409182238641741584);
456 const __m128d CCQ5 = _mm_set1_pd(13.5064170191802889145);
457 const __m128d CCQ4 = _mm_set1_pd(22.9367376522880577224);
458 const __m128d CCQ3 = _mm_set1_pd(15.930646027911794143);
459 const __m128d CCQ2 = _mm_set1_pd(11.0567237927800161565);
460 const __m128d CCQ1 = _mm_set1_pd(2.79257750980575282228);
461 /* CCQ0 == 1.0 */
462 const __m128d CCoffset = _mm_set1_pd(0.5579090118408203125);
464 const __m128d one = _mm_set1_pd(1.0);
465 const __m128d two = _mm_set1_pd(2.0);
467 const __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000, 0x00000000, 0x80000000, 0x00000000) );
469 __m128d xabs, x2, x4, t, t2, w, w2;
470 __m128d PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
471 __m128d PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
472 __m128d PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
473 __m128d res_erf, res_erfcB, res_erfcC, res_erfc, res;
474 __m128d mask, expmx2;
476 /* Calculate erf() */
477 xabs = gmx_mm_abs_pd(x);
478 x2 = _mm_mul_pd(x, x);
479 x4 = _mm_mul_pd(x2, x2);
481 PolyAP0 = _mm_macc_pd(CAP4, x4, CAP2);
482 PolyAP1 = _mm_macc_pd(CAP3, x4, CAP1);
483 PolyAP0 = _mm_macc_pd(PolyAP0, x4, CAP0);
484 PolyAP0 = _mm_macc_pd(PolyAP1, x2, PolyAP0);
486 PolyAQ1 = _mm_macc_pd(CAQ5, x4, CAQ3);
487 PolyAQ0 = _mm_macc_pd(CAQ4, x4, CAQ2);
488 PolyAQ1 = _mm_macc_pd(PolyAQ1, x4, CAQ1);
489 PolyAQ0 = _mm_macc_pd(PolyAQ0, x4, one);
490 PolyAQ0 = _mm_macc_pd(PolyAQ1, x2, PolyAQ0);
492 res_erf = _mm_macc_pd(PolyAP0, gmx_mm_inv_pd(PolyAQ0), CAoffset);
493 res_erf = _mm_mul_pd(x, res_erf);
495 /* Calculate erfc() in range [1,4.5] */
496 t = _mm_sub_pd(xabs, one);
497 t2 = _mm_mul_pd(t, t);
499 PolyBP0 = _mm_macc_pd(CBP6, t2, CBP4);
500 PolyBP1 = _mm_macc_pd(CBP5, t2, CBP3);
501 PolyBP0 = _mm_macc_pd(PolyBP0, t2, CBP2);
502 PolyBP1 = _mm_macc_pd(PolyBP1, t2, CBP1);
503 PolyBP0 = _mm_macc_pd(PolyBP0, t2, CBP0);
504 PolyBP0 = _mm_macc_pd(PolyBP1, t, PolyBP0);
506 PolyBQ1 = _mm_macc_pd(CBQ7, t2, CBQ5);
507 PolyBQ0 = _mm_macc_pd(CBQ6, t2, CBQ4);
508 PolyBQ1 = _mm_macc_pd(PolyBQ1, t2, CBQ3);
509 PolyBQ0 = _mm_macc_pd(PolyBQ0, t2, CBQ2);
510 PolyBQ1 = _mm_macc_pd(PolyBQ1, t2, CBQ1);
511 PolyBQ0 = _mm_macc_pd(PolyBQ0, t2, one);
512 PolyBQ0 = _mm_macc_pd(PolyBQ1, t, PolyBQ0);
514 res_erfcB = _mm_mul_pd(PolyBP0, gmx_mm_inv_pd(PolyBQ0));
516 res_erfcB = _mm_mul_pd(res_erfcB, xabs);
518 /* Calculate erfc() in range [4.5,inf] */
519 w = gmx_mm_inv_pd(xabs);
520 w2 = _mm_mul_pd(w, w);
522 PolyCP0 = _mm_macc_pd(CCP6, w2, CCP4);
523 PolyCP1 = _mm_macc_pd(CCP5, w2, CCP3);
524 PolyCP0 = _mm_macc_pd(PolyCP0, w2, CCP2);
525 PolyCP1 = _mm_macc_pd(PolyCP1, w2, CCP1);
526 PolyCP0 = _mm_macc_pd(PolyCP0, w2, CCP0);
527 PolyCP0 = _mm_macc_pd(PolyCP1, w, PolyCP0);
529 PolyCQ0 = _mm_macc_pd(CCQ6, w2, CCQ4);
530 PolyCQ1 = _mm_macc_pd(CCQ5, w2, CCQ3);
531 PolyCQ0 = _mm_macc_pd(PolyCQ0, w2, CCQ2);
532 PolyCQ1 = _mm_macc_pd(PolyCQ1, w2, CCQ1);
533 PolyCQ0 = _mm_macc_pd(PolyCQ0, w2, one);
534 PolyCQ0 = _mm_macc_pd(PolyCQ1, w, PolyCQ0);
536 expmx2 = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
538 res_erfcC = _mm_macc_pd(PolyCP0, gmx_mm_inv_pd(PolyCQ0), CCoffset);
539 res_erfcC = _mm_mul_pd(res_erfcC, w);
541 mask = _mm_cmpgt_pd(xabs, _mm_set1_pd(4.5));
542 res_erfc = _mm_blendv_pd(res_erfcB, res_erfcC, mask);
544 res_erfc = _mm_mul_pd(res_erfc, expmx2);
546 /* erfc(x<0) = 2-erfc(|x|) */
547 mask = _mm_cmplt_pd(x, _mm_setzero_pd());
548 res_erfc = _mm_blendv_pd(res_erfc, _mm_sub_pd(two, res_erfc), mask);
550 /* Select erf() or erfc() */
551 mask = _mm_cmplt_pd(xabs, one);
552 res = _mm_blendv_pd(_mm_sub_pd(one, res_erfc), res_erf, mask);
554 return res;
558 static __m128d
559 gmx_mm_erfc_pd(__m128d x)
561 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
562 const __m128d CAP4 = _mm_set1_pd(-0.431780540597889301512e-4);
563 const __m128d CAP3 = _mm_set1_pd(-0.00578562306260059236059);
564 const __m128d CAP2 = _mm_set1_pd(-0.028593586920219752446);
565 const __m128d CAP1 = _mm_set1_pd(-0.315924962948621698209);
566 const __m128d CAP0 = _mm_set1_pd(0.14952975608477029151);
568 const __m128d CAQ5 = _mm_set1_pd(-0.374089300177174709737e-5);
569 const __m128d CAQ4 = _mm_set1_pd(0.00015126584532155383535);
570 const __m128d CAQ3 = _mm_set1_pd(0.00536692680669480725423);
571 const __m128d CAQ2 = _mm_set1_pd(0.0668686825594046122636);
572 const __m128d CAQ1 = _mm_set1_pd(0.402604990869284362773);
573 /* CAQ0 == 1.0 */
574 const __m128d CAoffset = _mm_set1_pd(0.9788494110107421875);
576 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
577 const __m128d CBP6 = _mm_set1_pd(2.49650423685462752497647637088e-10);
578 const __m128d CBP5 = _mm_set1_pd(0.00119770193298159629350136085658);
579 const __m128d CBP4 = _mm_set1_pd(0.0164944422378370965881008942733);
580 const __m128d CBP3 = _mm_set1_pd(0.0984581468691775932063932439252);
581 const __m128d CBP2 = _mm_set1_pd(0.317364595806937763843589437418);
582 const __m128d CBP1 = _mm_set1_pd(0.554167062641455850932670067075);
583 const __m128d CBP0 = _mm_set1_pd(0.427583576155807163756925301060);
584 const __m128d CBQ7 = _mm_set1_pd(0.00212288829699830145976198384930);
585 const __m128d CBQ6 = _mm_set1_pd(0.0334810979522685300554606393425);
586 const __m128d CBQ5 = _mm_set1_pd(0.2361713785181450957579508850717);
587 const __m128d CBQ4 = _mm_set1_pd(0.955364736493055670530981883072);
588 const __m128d CBQ3 = _mm_set1_pd(2.36815675631420037315349279199);
589 const __m128d CBQ2 = _mm_set1_pd(3.55261649184083035537184223542);
590 const __m128d CBQ1 = _mm_set1_pd(2.93501136050160872574376997993);
591 /* CBQ0 == 1.0 */
593 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
594 const __m128d CCP6 = _mm_set1_pd(-2.8175401114513378771);
595 const __m128d CCP5 = _mm_set1_pd(-3.22729451764143718517);
596 const __m128d CCP4 = _mm_set1_pd(-2.5518551727311523996);
597 const __m128d CCP3 = _mm_set1_pd(-0.687717681153649930619);
598 const __m128d CCP2 = _mm_set1_pd(-0.212652252872804219852);
599 const __m128d CCP1 = _mm_set1_pd(0.0175389834052493308818);
600 const __m128d CCP0 = _mm_set1_pd(0.00628057170626964891937);
602 const __m128d CCQ6 = _mm_set1_pd(5.48409182238641741584);
603 const __m128d CCQ5 = _mm_set1_pd(13.5064170191802889145);
604 const __m128d CCQ4 = _mm_set1_pd(22.9367376522880577224);
605 const __m128d CCQ3 = _mm_set1_pd(15.930646027911794143);
606 const __m128d CCQ2 = _mm_set1_pd(11.0567237927800161565);
607 const __m128d CCQ1 = _mm_set1_pd(2.79257750980575282228);
608 /* CCQ0 == 1.0 */
609 const __m128d CCoffset = _mm_set1_pd(0.5579090118408203125);
611 const __m128d one = _mm_set1_pd(1.0);
612 const __m128d two = _mm_set1_pd(2.0);
614 const __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000, 0x00000000, 0x80000000, 0x00000000) );
616 __m128d xabs, x2, x4, t, t2, w, w2;
617 __m128d PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
618 __m128d PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
619 __m128d PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
620 __m128d res_erf, res_erfcB, res_erfcC, res_erfc, res;
621 __m128d mask, expmx2;
623 /* Calculate erf() */
624 xabs = gmx_mm_abs_pd(x);
625 x2 = _mm_mul_pd(x, x);
626 x4 = _mm_mul_pd(x2, x2);
628 PolyAP0 = _mm_macc_pd(CAP4, x4, CAP2);
629 PolyAP1 = _mm_macc_pd(CAP3, x4, CAP1);
630 PolyAP0 = _mm_macc_pd(PolyAP0, x4, CAP0);
631 PolyAP0 = _mm_macc_pd(PolyAP1, x2, PolyAP0);
633 PolyAQ1 = _mm_macc_pd(CAQ5, x4, CAQ3);
634 PolyAQ0 = _mm_macc_pd(CAQ4, x4, CAQ2);
635 PolyAQ1 = _mm_macc_pd(PolyAQ1, x4, CAQ1);
636 PolyAQ0 = _mm_macc_pd(PolyAQ0, x4, one);
637 PolyAQ0 = _mm_macc_pd(PolyAQ1, x2, PolyAQ0);
639 res_erf = _mm_macc_pd(PolyAP0, gmx_mm_inv_pd(PolyAQ0), CAoffset);
640 res_erf = _mm_mul_pd(x, res_erf);
642 /* Calculate erfc() in range [1,4.5] */
643 t = _mm_sub_pd(xabs, one);
644 t2 = _mm_mul_pd(t, t);
646 PolyBP0 = _mm_macc_pd(CBP6, t2, CBP4);
647 PolyBP1 = _mm_macc_pd(CBP5, t2, CBP3);
648 PolyBP0 = _mm_macc_pd(PolyBP0, t2, CBP2);
649 PolyBP1 = _mm_macc_pd(PolyBP1, t2, CBP1);
650 PolyBP0 = _mm_macc_pd(PolyBP0, t2, CBP0);
651 PolyBP0 = _mm_macc_pd(PolyBP1, t, PolyBP0);
653 PolyBQ1 = _mm_macc_pd(CBQ7, t2, CBQ5);
654 PolyBQ0 = _mm_macc_pd(CBQ6, t2, CBQ4);
655 PolyBQ1 = _mm_macc_pd(PolyBQ1, t2, CBQ3);
656 PolyBQ0 = _mm_macc_pd(PolyBQ0, t2, CBQ2);
657 PolyBQ1 = _mm_macc_pd(PolyBQ1, t2, CBQ1);
658 PolyBQ0 = _mm_macc_pd(PolyBQ0, t2, one);
659 PolyBQ0 = _mm_macc_pd(PolyBQ1, t, PolyBQ0);
661 res_erfcB = _mm_mul_pd(PolyBP0, gmx_mm_inv_pd(PolyBQ0));
663 res_erfcB = _mm_mul_pd(res_erfcB, xabs);
665 /* Calculate erfc() in range [4.5,inf] */
666 w = gmx_mm_inv_pd(xabs);
667 w2 = _mm_mul_pd(w, w);
669 PolyCP0 = _mm_macc_pd(CCP6, w2, CCP4);
670 PolyCP1 = _mm_macc_pd(CCP5, w2, CCP3);
671 PolyCP0 = _mm_macc_pd(PolyCP0, w2, CCP2);
672 PolyCP1 = _mm_macc_pd(PolyCP1, w2, CCP1);
673 PolyCP0 = _mm_macc_pd(PolyCP0, w2, CCP0);
674 PolyCP0 = _mm_macc_pd(PolyCP1, w, PolyCP0);
676 PolyCQ0 = _mm_macc_pd(CCQ6, w2, CCQ4);
677 PolyCQ1 = _mm_macc_pd(CCQ5, w2, CCQ3);
678 PolyCQ0 = _mm_macc_pd(PolyCQ0, w2, CCQ2);
679 PolyCQ1 = _mm_macc_pd(PolyCQ1, w2, CCQ1);
680 PolyCQ0 = _mm_macc_pd(PolyCQ0, w2, one);
681 PolyCQ0 = _mm_macc_pd(PolyCQ1, w, PolyCQ0);
683 expmx2 = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
685 res_erfcC = _mm_macc_pd(PolyCP0, gmx_mm_inv_pd(PolyCQ0), CCoffset);
686 res_erfcC = _mm_mul_pd(res_erfcC, w);
688 mask = _mm_cmpgt_pd(xabs, _mm_set1_pd(4.5));
689 res_erfc = _mm_blendv_pd(res_erfcB, res_erfcC, mask);
691 res_erfc = _mm_mul_pd(res_erfc, expmx2);
693 /* erfc(x<0) = 2-erfc(|x|) */
694 mask = _mm_cmplt_pd(x, _mm_setzero_pd());
695 res_erfc = _mm_blendv_pd(res_erfc, _mm_sub_pd(two, res_erfc), mask);
697 /* Select erf() or erfc() */
698 mask = _mm_cmplt_pd(xabs, one);
699 res = _mm_blendv_pd(res_erfc, _mm_sub_pd(one, res_erf), mask);
701 return res;
706 /* Calculate the force correction due to PME analytically.
708 * This routine is meant to enable analytical evaluation of the
709 * direct-space PME electrostatic force to avoid tables.
711 * The direct-space potential should be Erfc(beta*r)/r, but there
712 * are some problems evaluating that:
714 * First, the error function is difficult (read: expensive) to
715 * approxmiate accurately for intermediate to large arguments, and
716 * this happens already in ranges of beta*r that occur in simulations.
717 * Second, we now try to avoid calculating potentials in Gromacs but
718 * use forces directly.
720 * We can simply things slight by noting that the PME part is really
721 * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
723 * V= 1/r - Erf(beta*r)/r
725 * The first term we already have from the inverse square root, so
726 * that we can leave out of this routine.
728 * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
729 * the argument beta*r will be in the range 0.15 to ~4. Use your
730 * favorite plotting program to realize how well-behaved Erf(z)/z is
731 * in this range!
733 * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
734 * However, it turns out it is more efficient to approximate f(z)/z and
735 * then only use even powers. This is another minor optimization, since
736 * we actually WANT f(z)/z, because it is going to be multiplied by
737 * the vector between the two atoms to get the vectorial force. The
738 * fastest flops are the ones we can avoid calculating!
740 * So, here's how it should be used:
742 * 1. Calculate r^2.
743 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
744 * 3. Evaluate this routine with z^2 as the argument.
745 * 4. The return value is the expression:
748 * 2*exp(-z^2) erf(z)
749 * ------------ - --------
750 * sqrt(Pi)*z^2 z^3
752 * 5. Multiply the entire expression by beta^3. This will get you
754 * beta^3*2*exp(-z^2) beta^3*erf(z)
755 * ------------------ - ---------------
756 * sqrt(Pi)*z^2 z^3
758 * or, switching back to r (z=r*beta):
760 * 2*beta*exp(-r^2*beta^2) erf(r*beta)
761 * ----------------------- - -----------
762 * sqrt(Pi)*r^2 r^3
765 * With a bit of math exercise you should be able to confirm that
766 * this is exactly D[Erf[beta*r]/r,r] divided by r another time.
768 * 6. Add the result to 1/r^3, multiply by the product of the charges,
769 * and you have your force (divided by r). A final multiplication
770 * with the vector connecting the two particles and you have your
771 * vectorial force to add to the particles.
774 static __m128d
775 gmx_mm_pmecorrF_pd(__m128d z2)
777 const __m128d FN10 = _mm_set1_pd(-8.0072854618360083154e-14);
778 const __m128d FN9 = _mm_set1_pd(1.1859116242260148027e-11);
779 const __m128d FN8 = _mm_set1_pd(-8.1490406329798423616e-10);
780 const __m128d FN7 = _mm_set1_pd(3.4404793543907847655e-8);
781 const __m128d FN6 = _mm_set1_pd(-9.9471420832602741006e-7);
782 const __m128d FN5 = _mm_set1_pd(0.000020740315999115847456);
783 const __m128d FN4 = _mm_set1_pd(-0.00031991745139313364005);
784 const __m128d FN3 = _mm_set1_pd(0.0035074449373659008203);
785 const __m128d FN2 = _mm_set1_pd(-0.031750380176100813405);
786 const __m128d FN1 = _mm_set1_pd(0.13884101728898463426);
787 const __m128d FN0 = _mm_set1_pd(-0.75225277815249618847);
789 const __m128d FD5 = _mm_set1_pd(0.000016009278224355026701);
790 const __m128d FD4 = _mm_set1_pd(0.00051055686934806966046);
791 const __m128d FD3 = _mm_set1_pd(0.0081803507497974289008);
792 const __m128d FD2 = _mm_set1_pd(0.077181146026670287235);
793 const __m128d FD1 = _mm_set1_pd(0.41543303143712535988);
794 const __m128d FD0 = _mm_set1_pd(1.0);
796 __m128d z4;
797 __m128d polyFN0, polyFN1, polyFD0, polyFD1;
799 z4 = _mm_mul_pd(z2, z2);
801 polyFD1 = _mm_macc_pd(FD5, z4, FD3);
802 polyFD1 = _mm_macc_pd(polyFD1, z4, FD1);
803 polyFD1 = _mm_mul_pd(polyFD1, z2);
804 polyFD0 = _mm_macc_pd(FD4, z4, FD2);
805 polyFD0 = _mm_macc_pd(polyFD0, z4, FD0);
806 polyFD0 = _mm_add_pd(polyFD0, polyFD1);
808 polyFD0 = gmx_mm_inv_pd(polyFD0);
810 polyFN0 = _mm_macc_pd(FN10, z4, FN8);
811 polyFN0 = _mm_macc_pd(polyFN0, z4, FN6);
812 polyFN0 = _mm_macc_pd(polyFN0, z4, FN4);
813 polyFN0 = _mm_macc_pd(polyFN0, z4, FN2);
814 polyFN0 = _mm_macc_pd(polyFN0, z4, FN0);
815 polyFN1 = _mm_macc_pd(FN9, z4, FN7);
816 polyFN1 = _mm_macc_pd(polyFN1, z4, FN5);
817 polyFN1 = _mm_macc_pd(polyFN1, z4, FN3);
818 polyFN1 = _mm_macc_pd(polyFN1, z4, FN1);
819 polyFN0 = _mm_macc_pd(polyFN1, z2, polyFN0);
821 return _mm_mul_pd(polyFN0, polyFD0);
825 /* Calculate the potential correction due to PME analytically.
827 * This routine calculates Erf(z)/z, although you should provide z^2
828 * as the input argument.
830 * Here's how it should be used:
832 * 1. Calculate r^2.
833 * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
834 * 3. Evaluate this routine with z^2 as the argument.
835 * 4. The return value is the expression:
838 * erf(z)
839 * --------
842 * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
844 * erf(r*beta)
845 * -----------
848 * 6. Subtract the result from 1/r, multiply by the product of the charges,
849 * and you have your potential.
852 static __m128d
853 gmx_mm_pmecorrV_pd(__m128d z2)
855 const __m128d VN9 = _mm_set1_pd(-9.3723776169321855475e-13);
856 const __m128d VN8 = _mm_set1_pd(1.2280156762674215741e-10);
857 const __m128d VN7 = _mm_set1_pd(-7.3562157912251309487e-9);
858 const __m128d VN6 = _mm_set1_pd(2.6215886208032517509e-7);
859 const __m128d VN5 = _mm_set1_pd(-4.9532491651265819499e-6);
860 const __m128d VN4 = _mm_set1_pd(0.00025907400778966060389);
861 const __m128d VN3 = _mm_set1_pd(0.0010585044856156469792);
862 const __m128d VN2 = _mm_set1_pd(0.045247661136833092885);
863 const __m128d VN1 = _mm_set1_pd(0.11643931522926034421);
864 const __m128d VN0 = _mm_set1_pd(1.1283791671726767970);
866 const __m128d VD5 = _mm_set1_pd(0.000021784709867336150342);
867 const __m128d VD4 = _mm_set1_pd(0.00064293662010911388448);
868 const __m128d VD3 = _mm_set1_pd(0.0096311444822588683504);
869 const __m128d VD2 = _mm_set1_pd(0.085608012351550627051);
870 const __m128d VD1 = _mm_set1_pd(0.43652499166614811084);
871 const __m128d VD0 = _mm_set1_pd(1.0);
873 __m128d z4;
874 __m128d polyVN0, polyVN1, polyVD0, polyVD1;
876 z4 = _mm_mul_pd(z2, z2);
878 polyVD1 = _mm_macc_pd(VD5, z4, VD3);
879 polyVD0 = _mm_macc_pd(VD4, z4, VD2);
880 polyVD1 = _mm_macc_pd(polyVD1, z4, VD1);
881 polyVD0 = _mm_macc_pd(polyVD0, z4, VD0);
882 polyVD0 = _mm_macc_pd(polyVD1, z2, polyVD0);
884 polyVD0 = gmx_mm_inv_pd(polyVD0);
886 polyVN1 = _mm_macc_pd(VN9, z4, VN7);
887 polyVN0 = _mm_macc_pd(VN8, z4, VN6);
888 polyVN1 = _mm_macc_pd(polyVN1, z4, VN5);
889 polyVN0 = _mm_macc_pd(polyVN0, z4, VN4);
890 polyVN1 = _mm_macc_pd(polyVN1, z4, VN3);
891 polyVN0 = _mm_macc_pd(polyVN0, z4, VN2);
892 polyVN1 = _mm_macc_pd(polyVN1, z4, VN1);
893 polyVN0 = _mm_macc_pd(polyVN0, z4, VN0);
894 polyVN0 = _mm_macc_pd(polyVN1, z2, polyVN0);
896 return _mm_mul_pd(polyVN0, polyVD0);
900 static int
901 gmx_mm_sincos_pd(__m128d x,
902 __m128d *sinval,
903 __m128d *cosval)
905 #ifdef _MSC_VER
906 __declspec(align(16))
907 const double sintable[34] =
909 1.00000000000000000e+00, 0.00000000000000000e+00,
910 9.95184726672196929e-01, 9.80171403295606036e-02,
911 9.80785280403230431e-01, 1.95090322016128248e-01,
912 9.56940335732208824e-01, 2.90284677254462331e-01,
913 9.23879532511286738e-01, 3.82683432365089782e-01,
914 8.81921264348355050e-01, 4.71396736825997642e-01,
915 8.31469612302545236e-01, 5.55570233019602178e-01,
916 7.73010453362736993e-01, 6.34393284163645488e-01,
917 7.07106781186547573e-01, 7.07106781186547462e-01,
918 6.34393284163645599e-01, 7.73010453362736882e-01,
919 5.55570233019602289e-01, 8.31469612302545125e-01,
920 4.71396736825997809e-01, 8.81921264348354939e-01,
921 3.82683432365089837e-01, 9.23879532511286738e-01,
922 2.90284677254462276e-01, 9.56940335732208935e-01,
923 1.95090322016128304e-01, 9.80785280403230431e-01,
924 9.80171403295607702e-02, 9.95184726672196818e-01,
925 0.0, 1.00000000000000000e+00
927 #else
928 const __m128d sintable[17] =
930 _mm_set_pd( 0.0, 1.0 ),
931 _mm_set_pd( sin( 1.0 * (M_PI/2.0) / 16.0), cos( 1.0 * (M_PI/2.0) / 16.0) ),
932 _mm_set_pd( sin( 2.0 * (M_PI/2.0) / 16.0), cos( 2.0 * (M_PI/2.0) / 16.0) ),
933 _mm_set_pd( sin( 3.0 * (M_PI/2.0) / 16.0), cos( 3.0 * (M_PI/2.0) / 16.0) ),
934 _mm_set_pd( sin( 4.0 * (M_PI/2.0) / 16.0), cos( 4.0 * (M_PI/2.0) / 16.0) ),
935 _mm_set_pd( sin( 5.0 * (M_PI/2.0) / 16.0), cos( 5.0 * (M_PI/2.0) / 16.0) ),
936 _mm_set_pd( sin( 6.0 * (M_PI/2.0) / 16.0), cos( 6.0 * (M_PI/2.0) / 16.0) ),
937 _mm_set_pd( sin( 7.0 * (M_PI/2.0) / 16.0), cos( 7.0 * (M_PI/2.0) / 16.0) ),
938 _mm_set_pd( sin( 8.0 * (M_PI/2.0) / 16.0), cos( 8.0 * (M_PI/2.0) / 16.0) ),
939 _mm_set_pd( sin( 9.0 * (M_PI/2.0) / 16.0), cos( 9.0 * (M_PI/2.0) / 16.0) ),
940 _mm_set_pd( sin( 10.0 * (M_PI/2.0) / 16.0), cos( 10.0 * (M_PI/2.0) / 16.0) ),
941 _mm_set_pd( sin( 11.0 * (M_PI/2.0) / 16.0), cos( 11.0 * (M_PI/2.0) / 16.0) ),
942 _mm_set_pd( sin( 12.0 * (M_PI/2.0) / 16.0), cos( 12.0 * (M_PI/2.0) / 16.0) ),
943 _mm_set_pd( sin( 13.0 * (M_PI/2.0) / 16.0), cos( 13.0 * (M_PI/2.0) / 16.0) ),
944 _mm_set_pd( sin( 14.0 * (M_PI/2.0) / 16.0), cos( 14.0 * (M_PI/2.0) / 16.0) ),
945 _mm_set_pd( sin( 15.0 * (M_PI/2.0) / 16.0), cos( 15.0 * (M_PI/2.0) / 16.0) ),
946 _mm_set_pd( 1.0, 0.0 )
948 #endif
950 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
951 const __m128i signbit_epi32 = _mm_set1_epi32(0x80000000);
953 const __m128d tabscale = _mm_set1_pd(32.0/M_PI);
954 const __m128d invtabscale0 = _mm_set1_pd(9.81747508049011230469e-02);
955 const __m128d invtabscale1 = _mm_set1_pd(1.96197799156550576057e-08);
956 const __m128i ione = _mm_set1_epi32(1);
957 const __m128i i32 = _mm_set1_epi32(32);
958 const __m128i i16 = _mm_set1_epi32(16);
959 const __m128i tabmask = _mm_set1_epi32(0x3F);
960 const __m128d sinP7 = _mm_set1_pd(-1.0/5040.0);
961 const __m128d sinP5 = _mm_set1_pd(1.0/120.0);
962 const __m128d sinP3 = _mm_set1_pd(-1.0/6.0);
963 const __m128d sinP1 = _mm_set1_pd(1.0);
965 const __m128d cosP6 = _mm_set1_pd(-1.0/720.0);
966 const __m128d cosP4 = _mm_set1_pd(1.0/24.0);
967 const __m128d cosP2 = _mm_set1_pd(-1.0/2.0);
968 const __m128d cosP0 = _mm_set1_pd(1.0);
970 __m128d scalex;
971 __m128i tabidx, corridx;
972 __m128d xabs, z, z2, polySin, polyCos;
973 __m128d xpoint;
974 __m128d ypoint0, ypoint1;
976 __m128d sinpoint, cospoint;
977 __m128d xsign, ssign, csign;
978 __m128i imask, sswapsign, cswapsign;
979 __m128d minusone;
981 xsign = _mm_andnot_pd(signmask, x);
982 xabs = _mm_and_pd(x, signmask);
984 scalex = _mm_mul_pd(tabscale, xabs);
985 tabidx = _mm_cvtpd_epi32(scalex);
987 xpoint = _mm_round_pd(scalex, _MM_FROUND_TO_NEAREST_INT);
989 /* Extended precision arithmetics */
990 z = _mm_nmacc_pd(invtabscale0, xpoint, xabs);
991 z = _mm_nmacc_pd(invtabscale1, xpoint, z);
993 /* Range reduction to 0..2*Pi */
994 tabidx = _mm_and_si128(tabidx, tabmask);
996 /* tabidx is now in range [0,..,64] */
997 imask = _mm_cmpgt_epi32(tabidx, i32);
998 sswapsign = imask;
999 cswapsign = imask;
1000 corridx = _mm_and_si128(imask, i32);
1001 tabidx = _mm_sub_epi32(tabidx, corridx);
1003 /* tabidx is now in range [0..32] */
1004 imask = _mm_cmpgt_epi32(tabidx, i16);
1005 cswapsign = _mm_xor_si128(cswapsign, imask);
1006 corridx = _mm_sub_epi32(i32, tabidx);
1007 tabidx = _mm_blendv_epi8(tabidx, corridx, imask);
1008 /* tabidx is now in range [0..16] */
1009 ssign = _mm_cvtepi32_pd( _mm_or_si128( sswapsign, ione ) );
1010 csign = _mm_cvtepi32_pd( _mm_or_si128( cswapsign, ione ) );
1012 #ifdef _MSC_VER
1013 ypoint0 = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx, 0));
1014 ypoint1 = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx, 1));
1015 #else
1016 ypoint0 = sintable[_mm_extract_epi32(tabidx, 0)];
1017 ypoint1 = sintable[_mm_extract_epi32(tabidx, 1)];
1018 #endif
1019 sinpoint = _mm_unpackhi_pd(ypoint0, ypoint1);
1020 cospoint = _mm_unpacklo_pd(ypoint0, ypoint1);
1022 sinpoint = _mm_mul_pd(sinpoint, ssign);
1023 cospoint = _mm_mul_pd(cospoint, csign);
1025 z2 = _mm_mul_pd(z, z);
1027 polySin = _mm_macc_pd(sinP7, z2, sinP5);
1028 polySin = _mm_macc_pd(polySin, z2, sinP3);
1029 polySin = _mm_macc_pd(polySin, z2, sinP1);
1030 polySin = _mm_mul_pd(polySin, z);
1032 polyCos = _mm_macc_pd(cosP6, z2, cosP4);
1033 polyCos = _mm_macc_pd(polyCos, z2, cosP2);
1034 polyCos = _mm_macc_pd(polyCos, z2, cosP0);
1036 *sinval = _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint, polyCos), _mm_mul_pd(cospoint, polySin) ), xsign);
1037 *cosval = _mm_sub_pd( _mm_mul_pd(cospoint, polyCos), _mm_mul_pd(sinpoint, polySin) );
1039 return 0;
1043 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1044 * will then call the sincos() routine and waste a factor 2 in performance!
1046 static __m128d
1047 gmx_mm_sin_pd(__m128d x)
1049 __m128d s, c;
1050 gmx_mm_sincos_pd(x, &s, &c);
1051 return s;
1055 * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1056 * will then call the sincos() routine and waste a factor 2 in performance!
1058 static __m128d
1059 gmx_mm_cos_pd(__m128d x)
1061 __m128d s, c;
1062 gmx_mm_sincos_pd(x, &s, &c);
1063 return c;
1068 static __m128d
1069 gmx_mm_tan_pd(__m128d x)
1071 __m128d sinval, cosval;
1072 __m128d tanval;
1074 gmx_mm_sincos_pd(x, &sinval, &cosval);
1076 tanval = _mm_mul_pd(sinval, gmx_mm_inv_pd(cosval));
1078 return tanval;
1083 static __m128d
1084 gmx_mm_asin_pd(__m128d x)
1086 /* Same algorithm as cephes library */
1087 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1088 const __m128d limit1 = _mm_set1_pd(0.625);
1089 const __m128d limit2 = _mm_set1_pd(1e-8);
1090 const __m128d one = _mm_set1_pd(1.0);
1091 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
1092 const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1093 const __m128d morebits = _mm_set1_pd(6.123233995736765886130e-17);
1095 const __m128d P5 = _mm_set1_pd(4.253011369004428248960e-3);
1096 const __m128d P4 = _mm_set1_pd(-6.019598008014123785661e-1);
1097 const __m128d P3 = _mm_set1_pd(5.444622390564711410273e0);
1098 const __m128d P2 = _mm_set1_pd(-1.626247967210700244449e1);
1099 const __m128d P1 = _mm_set1_pd(1.956261983317594739197e1);
1100 const __m128d P0 = _mm_set1_pd(-8.198089802484824371615e0);
1102 const __m128d Q4 = _mm_set1_pd(-1.474091372988853791896e1);
1103 const __m128d Q3 = _mm_set1_pd(7.049610280856842141659e1);
1104 const __m128d Q2 = _mm_set1_pd(-1.471791292232726029859e2);
1105 const __m128d Q1 = _mm_set1_pd(1.395105614657485689735e2);
1106 const __m128d Q0 = _mm_set1_pd(-4.918853881490881290097e1);
1108 const __m128d R4 = _mm_set1_pd(2.967721961301243206100e-3);
1109 const __m128d R3 = _mm_set1_pd(-5.634242780008963776856e-1);
1110 const __m128d R2 = _mm_set1_pd(6.968710824104713396794e0);
1111 const __m128d R1 = _mm_set1_pd(-2.556901049652824852289e1);
1112 const __m128d R0 = _mm_set1_pd(2.853665548261061424989e1);
1114 const __m128d S3 = _mm_set1_pd(-2.194779531642920639778e1);
1115 const __m128d S2 = _mm_set1_pd(1.470656354026814941758e2);
1116 const __m128d S1 = _mm_set1_pd(-3.838770957603691357202e2);
1117 const __m128d S0 = _mm_set1_pd(3.424398657913078477438e2);
1119 __m128d sign;
1120 __m128d mask;
1121 __m128d xabs;
1122 __m128d zz, ww, z, q, w, y, zz2, ww2;
1123 __m128d PA, PB;
1124 __m128d QA, QB;
1125 __m128d RA, RB;
1126 __m128d SA, SB;
1127 __m128d nom, denom;
1129 sign = _mm_andnot_pd(signmask, x);
1130 xabs = _mm_and_pd(x, signmask);
1132 mask = _mm_cmpgt_pd(xabs, limit1);
1134 zz = _mm_sub_pd(one, xabs);
1135 ww = _mm_mul_pd(xabs, xabs);
1136 zz2 = _mm_mul_pd(zz, zz);
1137 ww2 = _mm_mul_pd(ww, ww);
1139 /* R */
1140 RA = _mm_macc_pd(R4, zz2, R2);
1141 RB = _mm_macc_pd(R3, zz2, R1);
1142 RA = _mm_macc_pd(RA, zz2, R0);
1143 RA = _mm_macc_pd(RB, zz, RA);
1145 /* S, SA = zz2 */
1146 SB = _mm_macc_pd(S3, zz2, S1);
1147 SA = _mm_add_pd(zz2, S2);
1148 SA = _mm_macc_pd(SA, zz2, S0);
1149 SA = _mm_macc_pd(SB, zz, SA);
1151 /* P */
1152 PA = _mm_macc_pd(P5, ww2, P3);
1153 PB = _mm_macc_pd(P4, ww2, P2);
1154 PA = _mm_macc_pd(PA, ww2, P1);
1155 PB = _mm_macc_pd(PB, ww2, P0);
1156 PA = _mm_macc_pd(PA, ww, PB);
1158 /* Q, QA = ww2 */
1159 QB = _mm_macc_pd(Q4, ww2, Q2);
1160 QA = _mm_add_pd(ww2, Q3);
1161 QA = _mm_macc_pd(QA, ww2, Q1);
1162 QB = _mm_macc_pd(QB, ww2, Q0);
1163 QA = _mm_macc_pd(QA, ww, QB);
1165 RA = _mm_mul_pd(RA, zz);
1166 PA = _mm_mul_pd(PA, ww);
1168 nom = _mm_blendv_pd( PA, RA, mask );
1169 denom = _mm_blendv_pd( QA, SA, mask );
1171 q = _mm_mul_pd( nom, gmx_mm_inv_pd(denom) );
1173 zz = _mm_add_pd(zz, zz);
1174 zz = gmx_mm_sqrt_pd(zz);
1175 z = _mm_sub_pd(quarterpi, zz);
1176 zz = _mm_mul_pd(zz, q);
1177 zz = _mm_sub_pd(zz, morebits);
1178 z = _mm_sub_pd(z, zz);
1179 z = _mm_add_pd(z, quarterpi);
1181 w = _mm_macc_pd(xabs, q, xabs);
1183 z = _mm_blendv_pd( w, z, mask );
1185 mask = _mm_cmpgt_pd(xabs, limit2);
1186 z = _mm_blendv_pd( xabs, z, mask );
1188 z = _mm_xor_pd(z, sign);
1190 return z;
1194 static __m128d
1195 gmx_mm_acos_pd(__m128d x)
1197 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1198 const __m128d one = _mm_set1_pd(1.0);
1199 const __m128d half = _mm_set1_pd(0.5);
1200 const __m128d pi = _mm_set1_pd(M_PI);
1201 const __m128d quarterpi0 = _mm_set1_pd(7.85398163397448309616e-1);
1202 const __m128d quarterpi1 = _mm_set1_pd(6.123233995736765886130e-17);
1205 __m128d mask1;
1207 __m128d z, z1, z2;
1209 mask1 = _mm_cmpgt_pd(x, half);
1210 z1 = _mm_mul_pd(half, _mm_sub_pd(one, x));
1211 z1 = gmx_mm_sqrt_pd(z1);
1212 z = _mm_blendv_pd( x, z1, mask1 );
1214 z = gmx_mm_asin_pd(z);
1216 z1 = _mm_add_pd(z, z);
1218 z2 = _mm_sub_pd(quarterpi0, z);
1219 z2 = _mm_add_pd(z2, quarterpi1);
1220 z2 = _mm_add_pd(z2, quarterpi0);
1222 z = _mm_blendv_pd(z2, z1, mask1);
1224 return z;
1227 static __m128d
1228 gmx_mm_atan_pd(__m128d x)
1230 /* Same algorithm as cephes library */
1231 const __m128d signmask = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1232 const __m128d limit1 = _mm_set1_pd(0.66);
1233 const __m128d limit2 = _mm_set1_pd(2.41421356237309504880);
1234 const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1235 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
1236 const __m128d mone = _mm_set1_pd(-1.0);
1237 const __m128d morebits1 = _mm_set1_pd(0.5*6.123233995736765886130E-17);
1238 const __m128d morebits2 = _mm_set1_pd(6.123233995736765886130E-17);
1240 const __m128d P4 = _mm_set1_pd(-8.750608600031904122785E-1);
1241 const __m128d P3 = _mm_set1_pd(-1.615753718733365076637E1);
1242 const __m128d P2 = _mm_set1_pd(-7.500855792314704667340E1);
1243 const __m128d P1 = _mm_set1_pd(-1.228866684490136173410E2);
1244 const __m128d P0 = _mm_set1_pd(-6.485021904942025371773E1);
1246 const __m128d Q4 = _mm_set1_pd(2.485846490142306297962E1);
1247 const __m128d Q3 = _mm_set1_pd(1.650270098316988542046E2);
1248 const __m128d Q2 = _mm_set1_pd(4.328810604912902668951E2);
1249 const __m128d Q1 = _mm_set1_pd(4.853903996359136964868E2);
1250 const __m128d Q0 = _mm_set1_pd(1.945506571482613964425E2);
1252 __m128d sign;
1253 __m128d mask1, mask2;
1254 __m128d y, t1, t2;
1255 __m128d z, z2;
1256 __m128d P_A, P_B, Q_A, Q_B;
1258 sign = _mm_andnot_pd(signmask, x);
1259 x = _mm_and_pd(x, signmask);
1261 mask1 = _mm_cmpgt_pd(x, limit1);
1262 mask2 = _mm_cmpgt_pd(x, limit2);
1264 t1 = _mm_mul_pd(_mm_add_pd(x, mone), gmx_mm_inv_pd(_mm_sub_pd(x, mone)));
1265 t2 = _mm_mul_pd(mone, gmx_mm_inv_pd(x));
1267 y = _mm_and_pd(mask1, quarterpi);
1268 y = _mm_or_pd( _mm_and_pd(mask2, halfpi), _mm_andnot_pd(mask2, y) );
1270 x = _mm_or_pd( _mm_and_pd(mask1, t1), _mm_andnot_pd(mask1, x) );
1271 x = _mm_or_pd( _mm_and_pd(mask2, t2), _mm_andnot_pd(mask2, x) );
1273 z = _mm_mul_pd(x, x);
1274 z2 = _mm_mul_pd(z, z);
1276 P_A = _mm_macc_pd(P4, z2, P2);
1277 P_B = _mm_macc_pd(P3, z2, P1);
1278 P_A = _mm_macc_pd(P_A, z2, P0);
1279 P_A = _mm_macc_pd(P_B, z, P_A);
1281 /* Q_A = z2 */
1282 Q_B = _mm_macc_pd(Q4, z2, Q2);
1283 Q_A = _mm_add_pd(z2, Q3);
1284 Q_A = _mm_macc_pd(Q_A, z2, Q1);
1285 Q_B = _mm_macc_pd(Q_B, z2, Q0);
1286 Q_A = _mm_macc_pd(Q_A, z, Q_B);
1288 z = _mm_mul_pd(z, P_A);
1289 z = _mm_mul_pd(z, gmx_mm_inv_pd(Q_A));
1290 z = _mm_macc_pd(z, x, x);
1292 t1 = _mm_and_pd(mask1, morebits1);
1293 t1 = _mm_or_pd( _mm_and_pd(mask2, morebits2), _mm_andnot_pd(mask2, t1) );
1295 z = _mm_add_pd(z, t1);
1296 y = _mm_add_pd(y, z);
1298 y = _mm_xor_pd(y, sign);
1300 return y;
1304 static __m128d
1305 gmx_mm_atan2_pd(__m128d y, __m128d x)
1307 const __m128d pi = _mm_set1_pd(M_PI);
1308 const __m128d minuspi = _mm_set1_pd(-M_PI);
1309 const __m128d halfpi = _mm_set1_pd(M_PI/2.0);
1310 const __m128d minushalfpi = _mm_set1_pd(-M_PI/2.0);
1312 __m128d z, z1, z3, z4;
1313 __m128d w;
1314 __m128d maskx_lt, maskx_eq;
1315 __m128d masky_lt, masky_eq;
1316 __m128d mask1, mask2, mask3, mask4, maskall;
1318 maskx_lt = _mm_cmplt_pd(x, _mm_setzero_pd());
1319 masky_lt = _mm_cmplt_pd(y, _mm_setzero_pd());
1320 maskx_eq = _mm_cmpeq_pd(x, _mm_setzero_pd());
1321 masky_eq = _mm_cmpeq_pd(y, _mm_setzero_pd());
1323 z = _mm_mul_pd(y, gmx_mm_inv_pd(x));
1324 z = gmx_mm_atan_pd(z);
1326 mask1 = _mm_and_pd(maskx_eq, masky_lt);
1327 mask2 = _mm_andnot_pd(maskx_lt, masky_eq);
1328 mask3 = _mm_andnot_pd( _mm_or_pd(masky_lt, masky_eq), maskx_eq);
1329 mask4 = _mm_and_pd(masky_eq, maskx_lt);
1331 maskall = _mm_or_pd( _mm_or_pd(mask1, mask2), _mm_or_pd(mask3, mask4) );
1333 z = _mm_andnot_pd(maskall, z);
1334 z1 = _mm_and_pd(mask1, minushalfpi);
1335 z3 = _mm_and_pd(mask3, halfpi);
1336 z4 = _mm_and_pd(mask4, pi);
1338 z = _mm_or_pd( _mm_or_pd(z, z1), _mm_or_pd(z3, z4) );
1340 w = _mm_blendv_pd(pi, minuspi, masky_lt);
1341 w = _mm_and_pd(w, maskx_lt);
1343 w = _mm_andnot_pd(maskall, w);
1345 z = _mm_add_pd(z, w);
1347 return z;
1350 #endif /*_gmx_math_x86_avx_128_fma_double_h_ */