3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gromacs Runs On Most of All Computer Systems
41 #define gmx_inline inline
44 #define gmx_inline __inline
52 collection of in-line ready operations:
54 lookup-table optimized scalar operations:
55 real gmx_invsqrt(real x)
56 void vecinvsqrt(real in[],real out[],int n)
57 void vecrecip(real in[],real out[],int n)
62 void rvec_add(const rvec a,const rvec b,rvec c) c = a + b
63 void dvec_add(const dvec a,const dvec b,dvec c) c = a + b
64 void ivec_add(const ivec a,const ivec b,ivec c) c = a + b
65 void rvec_inc(rvec a,const rvec b) a += b
66 void dvec_inc(dvec a,const dvec b) a += b
67 void ivec_inc(ivec a,const ivec b) a += b
68 void rvec_sub(const rvec a,const rvec b,rvec c) c = a - b
69 void dvec_sub(const dvec a,const dvec b,dvec c) c = a - b
70 void rvec_dec(rvec a,rvec b) a -= b
71 void copy_rvec(const rvec a,rvec b) b = a (reals)
72 void copy_dvec(const dvec a,dvec b) b = a (reals)
73 void copy_ivec(const ivec a,ivec b) b = a (integers)
74 void ivec_sub(const ivec a,const ivec b,ivec c) c = a - b
75 void svmul(real a,rvec v1,rvec v2) v2 = a * v1
76 void dsvmul(double a,dvec v1,dvec v2) v2 = a * v1
77 void clear_rvec(rvec a) a = 0
78 void clear_dvec(dvec a) a = 0
79 void clear_ivec(rvec a) a = 0
80 void clear_rvecs(int n,rvec v[])
81 real iprod(rvec a,rvec b) = a . b (inner product)
82 double diprod(dvec a,dvec b) = a . b (inner product)
83 real iiprod(ivec a,ivec b) = a . b (integers)
84 real norm2(rvec a) = | a |^2 ( = x*y*z )
85 double dnorm2(dvec a) = | a |^2 ( = x*y*z )
86 real norm(rvec a) = | a |
87 double dnorm(dvec a) = | a |
88 void cprod(rvec a,rvec b,rvec c) c = a x b (cross product)
89 void dprod(rvec a,rvec b,rvec c) c = a x b (cross product)
90 void dprod(rvec a,rvec b,rvec c) c = a * b (direct product)
91 real cos_angle(rvec a,rvec b)
92 real cos_angle_no_table(rvec a,rvec b)
93 real distance2(rvec v1, rvec v2) = | v2 - v1 |^2
94 void unitv(rvec src,rvec dest) dest = src / |src|
95 void unitv_no_table(rvec src,rvec dest) dest = src / |src|
97 matrix (3x3) operations:
98 ! indicates that dest should not be the same as a, b or src
99 the _ur0 varieties work on matrices that have only zeros
100 in the upper right part, such as box matrices, these varieties
101 could produce less rounding errors, not due to the operations themselves,
102 but because the compiler can easier recombine the operations
103 void copy_mat(matrix a,matrix b) b = a
104 void clear_mat(matrix a) a = 0
105 void mmul(matrix a,matrix b,matrix dest) ! dest = a . b
106 void mmul_ur0(matrix a,matrix b,matrix dest) dest = a . b
107 void transpose(matrix src,matrix dest) ! dest = src*
108 void tmmul(matrix a,matrix b,matrix dest) ! dest = a* . b
109 void mtmul(matrix a,matrix b,matrix dest) ! dest = a . b*
110 real det(matrix a) = det(a)
111 void m_add(matrix a,matrix b,matrix dest) dest = a + b
112 void m_sub(matrix a,matrix b,matrix dest) dest = a - b
113 void msmul(matrix m1,real r1,matrix dest) dest = r1 * m1
114 void m_inv_ur0(matrix src,matrix dest) dest = src^-1
115 void m_inv(matrix src,matrix dest) ! dest = src^-1
116 void mvmul(matrix a,rvec src,rvec dest) ! dest = a . src
117 void mvmul_ur0(matrix a,rvec src,rvec dest) dest = a . src
118 void tmvmul_ur0(matrix a,rvec src,rvec dest) dest = a* . src
119 real trace(matrix m) = trace(m)
122 #include "types/simple.h"
124 #include "typedefs.h"
125 #include "sysstuff.h"
127 #include "gmx_fatal.h"
128 #include "mpelogging.h"
134 } /* avoid screwing up indentation */
138 #define EXP_LSB 0x00800000
139 #define EXP_MASK 0x7f800000
141 #define FRACT_MASK 0x007fffff
142 #define FRACT_SIZE 11 /* significant part of fraction */
143 #define FRACT_SHIFT (EXP_SHIFT-FRACT_SIZE)
144 #define EXP_ADDR(val) (((val)&EXP_MASK)>>EXP_SHIFT)
145 #define FRACT_ADDR(val) (((val)&(FRACT_MASK|EXP_LSB))>>FRACT_SHIFT)
147 #define PR_VEC(a) a[XX],a[YY],a[ZZ]
149 #ifdef GMX_SOFTWARE_INVSQRT
150 extern const unsigned int * gmx_invsqrt_exptab
;
151 extern const unsigned int * gmx_invsqrt_fracttab
;
162 #ifdef GMX_SOFTWARE_INVSQRT
163 static real
gmx_invsqrt(real x
)
166 const real three
=3.0;
167 t_convert result
,bit_pattern
;
168 unsigned int exp
,fract
;
176 exp
= EXP_ADDR(bit_pattern
.bval
);
177 fract
= FRACT_ADDR(bit_pattern
.bval
);
178 result
.bval
=gmx_invsqrt_exptab
[exp
] | gmx_invsqrt_fracttab
[fract
];
181 y
=(half
*lu
*(three
-((x
*lu
)*lu
)));
183 y2
=(half
*y
*(three
-((x
*y
)*y
)));
185 return y2
; /* 10 Flops */
187 return y
; /* 5 Flops */
191 #endif /* gmx_invsqrt */
193 #ifdef GMX_POWERPC_SQRT
194 static real
gmx_invsqrt(real x
)
197 const real three
=3.0;
198 t_convert result
,bit_pattern
;
199 unsigned int exp
,fract
;
206 lu
= __frsqrte((double)x
);
208 y
=(half
*lu
*(three
-((x
*lu
)*lu
)));
210 #if (GMX_POWERPC_SQRT==2)
211 /* Extra iteration required */
212 y
=(half
*y
*(three
-((x
*y
)*y
)));
216 y2
=(half
*y
*(three
-((x
*y
)*y
)));
218 return y2
; /* 10 Flops */
220 return y
; /* 5 Flops */
224 #endif /* powerpc_invsqrt */
228 #define gmx_invsqrt(x) (1.0f/sqrt(x))
235 static real
sqr(real x
)
240 static inline double dsqr(double x
)
245 /* Maclaurin series for sinh(x)/x, useful for NH chains and MTTK pressure control
246 Here, we compute it to 10th order, which might be overkill, 8th is probably enough,
247 but it's not very much more expensive. */
249 static inline real
series_sinhx(real x
)
252 return (1 + (x2
/6.0)*(1 + (x2
/20.0)*(1 + (x2
/42.0)*(1 + (x2
/72.0)*(1 + (x2
/110.0))))));
255 extern void vecinvsqrt(real in
[],real out
[],int n
);
256 /* Perform out[i]=1.0/sqrt(in[i]) for n elements */
259 extern void vecrecip(real in
[],real out
[],int n
);
260 /* Perform out[i]=1.0/(in[i]) for n elements */
262 /* Note: If you need a fast version of vecinvsqrt
263 * and/or vecrecip, call detectcpu() and run the SSE/3DNow/SSE2/Altivec
264 * versions if your hardware supports it.
266 * To use those routines, your memory HAS TO BE CACHE-ALIGNED.
267 * Start by allocating 31 bytes more than you need, put
268 * this in a temp variable (e.g. _buf, so you can free it later), and
269 * create your aligned array buf with
271 * buf=(real *) ( ( (unsigned long int)_buf + 31 ) & (~0x1f) );
275 static inline void rvec_add(const rvec a
,const rvec b
,rvec c
)
288 static inline void dvec_add(const dvec a
,const dvec b
,dvec c
)
301 static inline void ivec_add(const ivec a
,const ivec b
,ivec c
)
314 static inline void rvec_inc(rvec a
,const rvec b
)
327 static inline void dvec_inc(dvec a
,const dvec b
)
340 static inline void rvec_sub(const rvec a
,const rvec b
,rvec c
)
353 static inline void dvec_sub(const dvec a
,const dvec b
,dvec c
)
366 static inline void rvec_dec(rvec a
,const rvec b
)
379 static inline void copy_rvec(const rvec a
,rvec b
)
386 static inline void copy_rvecn(rvec
*a
,rvec
*b
,int startn
, int endn
)
389 for (i
=startn
;i
<endn
;i
++) {
396 static inline void copy_dvec(const dvec a
,dvec b
)
403 static inline void copy_ivec(const ivec a
,ivec b
)
410 static inline void ivec_sub(const ivec a
,const ivec b
,ivec c
)
423 static inline void copy_mat(matrix a
,matrix b
)
425 copy_rvec(a
[XX
],b
[XX
]);
426 copy_rvec(a
[YY
],b
[YY
]);
427 copy_rvec(a
[ZZ
],b
[ZZ
]);
430 static inline void svmul(real a
,const rvec v1
,rvec v2
)
437 static inline void dsvmul(double a
,const dvec v1
,dvec v2
)
444 static inline real
distance2(const rvec v1
,const rvec v2
)
446 return sqr(v2
[XX
]-v1
[XX
]) + sqr(v2
[YY
]-v1
[YY
]) + sqr(v2
[ZZ
]-v1
[ZZ
]);
449 static inline void clear_rvec(rvec a
)
451 /* The ibm compiler has problems with inlining this
452 * when we use a const real variable
459 static inline void clear_dvec(dvec a
)
461 /* The ibm compiler has problems with inlining this
462 * when we use a const real variable
469 static inline void clear_ivec(ivec a
)
476 static inline void clear_rvecs(int n
,rvec v
[])
478 /* memset(v[0],0,DIM*n*sizeof(v[0][0])); */
481 GMX_MPE_LOG(ev_clear_rvecs_start
);
486 GMX_MPE_LOG(ev_clear_rvecs_finish
);
489 static inline void clear_mat(matrix a
)
491 /* memset(a[0],0,DIM*DIM*sizeof(a[0][0])); */
495 a
[XX
][XX
]=a
[XX
][YY
]=a
[XX
][ZZ
]=nul
;
496 a
[YY
][XX
]=a
[YY
][YY
]=a
[YY
][ZZ
]=nul
;
497 a
[ZZ
][XX
]=a
[ZZ
][YY
]=a
[ZZ
][ZZ
]=nul
;
500 static inline real
iprod(const rvec a
,const rvec b
)
502 return (a
[XX
]*b
[XX
]+a
[YY
]*b
[YY
]+a
[ZZ
]*b
[ZZ
]);
505 static inline double diprod(const dvec a
,const dvec b
)
507 return (a
[XX
]*b
[XX
]+a
[YY
]*b
[YY
]+a
[ZZ
]*b
[ZZ
]);
510 static inline int iiprod(const ivec a
,const ivec b
)
512 return (a
[XX
]*b
[XX
]+a
[YY
]*b
[YY
]+a
[ZZ
]*b
[ZZ
]);
515 static inline real
norm2(const rvec a
)
517 return a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
];
520 static inline double dnorm2(const dvec a
)
522 return a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
];
525 static inline real
norm(const rvec a
)
527 return (real
)sqrt(a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
]);
530 static inline double dnorm(const dvec a
)
532 return sqrt(a
[XX
]*a
[XX
]+a
[YY
]*a
[YY
]+a
[ZZ
]*a
[ZZ
]);
536 * Do _not_ use these routines to calculate the angle between two vectors
537 * as acos(cos_angle(u,v)). While it might seem obvious, the acos function
538 * is very flat close to -1 and 1, which will lead to accuracy-loss.
539 * Instead, use the new gmx_angle() function directly.
542 cos_angle(const rvec a
,const rvec b
)
545 * ax*bx + ay*by + az*bz
546 * cos-vec (a,b) = ---------------------
551 double aa
,bb
,ip
,ipa
,ipb
,ipab
; /* For accuracy these must be double! */
554 for(m
=0; (m
<DIM
); m
++) { /* 18 */
563 cosval
= ip
*gmx_invsqrt(ipab
); /* 7 */
576 * Do _not_ use these routines to calculate the angle between two vectors
577 * as acos(cos_angle(u,v)). While it might seem obvious, the acos function
578 * is very flat close to -1 and 1, which will lead to accuracy-loss.
579 * Instead, use the new gmx_angle() function directly.
582 cos_angle_no_table(const rvec a
,const rvec b
)
584 /* This version does not need the invsqrt lookup table */
587 double aa
,bb
,ip
,ipa
,ipb
; /* For accuracy these must be double! */
590 for(m
=0; (m
<DIM
); m
++) { /* 18 */
597 cosval
=ip
/sqrt(ipa
*ipb
); /* 12 */
608 static inline void cprod(const rvec a
,const rvec b
,rvec c
)
610 c
[XX
]=a
[YY
]*b
[ZZ
]-a
[ZZ
]*b
[YY
];
611 c
[YY
]=a
[ZZ
]*b
[XX
]-a
[XX
]*b
[ZZ
];
612 c
[ZZ
]=a
[XX
]*b
[YY
]-a
[YY
]*b
[XX
];
615 static inline void dcprod(const dvec a
,const dvec b
,dvec c
)
617 c
[XX
]=a
[YY
]*b
[ZZ
]-a
[ZZ
]*b
[YY
];
618 c
[YY
]=a
[ZZ
]*b
[XX
]-a
[XX
]*b
[ZZ
];
619 c
[ZZ
]=a
[XX
]*b
[YY
]-a
[YY
]*b
[XX
];
622 /* This routine calculates the angle between a & b without any loss of accuracy close to 0/PI.
623 * If you only need cos(theta), use the cos_angle() routines to save a few cycles.
624 * This routine is faster than it might appear, since atan2 is accelerated on many CPUs (e.g. x86).
627 gmx_angle(const rvec a
, const rvec b
)
637 return atan2(wlen
,s
);
640 static inline void mmul_ur0(matrix a
,matrix b
,matrix dest
)
642 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
];
645 dest
[YY
][XX
]=a
[YY
][XX
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[YY
][XX
];
646 dest
[YY
][YY
]= a
[YY
][YY
]*b
[YY
][YY
];
648 dest
[ZZ
][XX
]=a
[ZZ
][XX
]*b
[XX
][XX
]+a
[ZZ
][YY
]*b
[YY
][XX
]+a
[ZZ
][ZZ
]*b
[ZZ
][XX
];
649 dest
[ZZ
][YY
]= a
[ZZ
][YY
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][YY
];
650 dest
[ZZ
][ZZ
]= a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
653 static inline void mmul(matrix a
,matrix b
,matrix dest
)
655 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
]+a
[XX
][YY
]*b
[YY
][XX
]+a
[XX
][ZZ
]*b
[ZZ
][XX
];
656 dest
[YY
][XX
]=a
[YY
][XX
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[YY
][XX
]+a
[YY
][ZZ
]*b
[ZZ
][XX
];
657 dest
[ZZ
][XX
]=a
[ZZ
][XX
]*b
[XX
][XX
]+a
[ZZ
][YY
]*b
[YY
][XX
]+a
[ZZ
][ZZ
]*b
[ZZ
][XX
];
658 dest
[XX
][YY
]=a
[XX
][XX
]*b
[XX
][YY
]+a
[XX
][YY
]*b
[YY
][YY
]+a
[XX
][ZZ
]*b
[ZZ
][YY
];
659 dest
[YY
][YY
]=a
[YY
][XX
]*b
[XX
][YY
]+a
[YY
][YY
]*b
[YY
][YY
]+a
[YY
][ZZ
]*b
[ZZ
][YY
];
660 dest
[ZZ
][YY
]=a
[ZZ
][XX
]*b
[XX
][YY
]+a
[ZZ
][YY
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][YY
];
661 dest
[XX
][ZZ
]=a
[XX
][XX
]*b
[XX
][ZZ
]+a
[XX
][YY
]*b
[YY
][ZZ
]+a
[XX
][ZZ
]*b
[ZZ
][ZZ
];
662 dest
[YY
][ZZ
]=a
[YY
][XX
]*b
[XX
][ZZ
]+a
[YY
][YY
]*b
[YY
][ZZ
]+a
[YY
][ZZ
]*b
[ZZ
][ZZ
];
663 dest
[ZZ
][ZZ
]=a
[ZZ
][XX
]*b
[XX
][ZZ
]+a
[ZZ
][YY
]*b
[YY
][ZZ
]+a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
666 static inline void transpose(matrix src
,matrix dest
)
668 dest
[XX
][XX
]=src
[XX
][XX
];
669 dest
[YY
][XX
]=src
[XX
][YY
];
670 dest
[ZZ
][XX
]=src
[XX
][ZZ
];
671 dest
[XX
][YY
]=src
[YY
][XX
];
672 dest
[YY
][YY
]=src
[YY
][YY
];
673 dest
[ZZ
][YY
]=src
[YY
][ZZ
];
674 dest
[XX
][ZZ
]=src
[ZZ
][XX
];
675 dest
[YY
][ZZ
]=src
[ZZ
][YY
];
676 dest
[ZZ
][ZZ
]=src
[ZZ
][ZZ
];
679 static inline void tmmul(matrix a
,matrix b
,matrix dest
)
681 /* Computes dest=mmul(transpose(a),b,dest) - used in do_pr_pcoupl */
682 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
]+a
[YY
][XX
]*b
[YY
][XX
]+a
[ZZ
][XX
]*b
[ZZ
][XX
];
683 dest
[XX
][YY
]=a
[XX
][XX
]*b
[XX
][YY
]+a
[YY
][XX
]*b
[YY
][YY
]+a
[ZZ
][XX
]*b
[ZZ
][YY
];
684 dest
[XX
][ZZ
]=a
[XX
][XX
]*b
[XX
][ZZ
]+a
[YY
][XX
]*b
[YY
][ZZ
]+a
[ZZ
][XX
]*b
[ZZ
][ZZ
];
685 dest
[YY
][XX
]=a
[XX
][YY
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[YY
][XX
]+a
[ZZ
][YY
]*b
[ZZ
][XX
];
686 dest
[YY
][YY
]=a
[XX
][YY
]*b
[XX
][YY
]+a
[YY
][YY
]*b
[YY
][YY
]+a
[ZZ
][YY
]*b
[ZZ
][YY
];
687 dest
[YY
][ZZ
]=a
[XX
][YY
]*b
[XX
][ZZ
]+a
[YY
][YY
]*b
[YY
][ZZ
]+a
[ZZ
][YY
]*b
[ZZ
][ZZ
];
688 dest
[ZZ
][XX
]=a
[XX
][ZZ
]*b
[XX
][XX
]+a
[YY
][ZZ
]*b
[YY
][XX
]+a
[ZZ
][ZZ
]*b
[ZZ
][XX
];
689 dest
[ZZ
][YY
]=a
[XX
][ZZ
]*b
[XX
][YY
]+a
[YY
][ZZ
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][YY
];
690 dest
[ZZ
][ZZ
]=a
[XX
][ZZ
]*b
[XX
][ZZ
]+a
[YY
][ZZ
]*b
[YY
][ZZ
]+a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
693 static inline void mtmul(matrix a
,matrix b
,matrix dest
)
695 /* Computes dest=mmul(a,transpose(b),dest) - used in do_pr_pcoupl */
696 dest
[XX
][XX
]=a
[XX
][XX
]*b
[XX
][XX
]+a
[XX
][YY
]*b
[XX
][YY
]+a
[XX
][ZZ
]*b
[XX
][ZZ
];
697 dest
[XX
][YY
]=a
[XX
][XX
]*b
[YY
][XX
]+a
[XX
][YY
]*b
[YY
][YY
]+a
[XX
][ZZ
]*b
[YY
][ZZ
];
698 dest
[XX
][ZZ
]=a
[XX
][XX
]*b
[ZZ
][XX
]+a
[XX
][YY
]*b
[ZZ
][YY
]+a
[XX
][ZZ
]*b
[ZZ
][ZZ
];
699 dest
[YY
][XX
]=a
[YY
][XX
]*b
[XX
][XX
]+a
[YY
][YY
]*b
[XX
][YY
]+a
[YY
][ZZ
]*b
[XX
][ZZ
];
700 dest
[YY
][YY
]=a
[YY
][XX
]*b
[YY
][XX
]+a
[YY
][YY
]*b
[YY
][YY
]+a
[YY
][ZZ
]*b
[YY
][ZZ
];
701 dest
[YY
][ZZ
]=a
[YY
][XX
]*b
[ZZ
][XX
]+a
[YY
][YY
]*b
[ZZ
][YY
]+a
[YY
][ZZ
]*b
[ZZ
][ZZ
];
702 dest
[ZZ
][XX
]=a
[ZZ
][XX
]*b
[XX
][XX
]+a
[ZZ
][YY
]*b
[XX
][YY
]+a
[ZZ
][ZZ
]*b
[XX
][ZZ
];
703 dest
[ZZ
][YY
]=a
[ZZ
][XX
]*b
[YY
][XX
]+a
[ZZ
][YY
]*b
[YY
][YY
]+a
[ZZ
][ZZ
]*b
[YY
][ZZ
];
704 dest
[ZZ
][ZZ
]=a
[ZZ
][XX
]*b
[ZZ
][XX
]+a
[ZZ
][YY
]*b
[ZZ
][YY
]+a
[ZZ
][ZZ
]*b
[ZZ
][ZZ
];
707 static inline real
det(matrix a
)
709 return ( a
[XX
][XX
]*(a
[YY
][YY
]*a
[ZZ
][ZZ
]-a
[ZZ
][YY
]*a
[YY
][ZZ
])
710 -a
[YY
][XX
]*(a
[XX
][YY
]*a
[ZZ
][ZZ
]-a
[ZZ
][YY
]*a
[XX
][ZZ
])
711 +a
[ZZ
][XX
]*(a
[XX
][YY
]*a
[YY
][ZZ
]-a
[YY
][YY
]*a
[XX
][ZZ
]));
714 static inline void m_add(matrix a
,matrix b
,matrix dest
)
716 dest
[XX
][XX
]=a
[XX
][XX
]+b
[XX
][XX
];
717 dest
[XX
][YY
]=a
[XX
][YY
]+b
[XX
][YY
];
718 dest
[XX
][ZZ
]=a
[XX
][ZZ
]+b
[XX
][ZZ
];
719 dest
[YY
][XX
]=a
[YY
][XX
]+b
[YY
][XX
];
720 dest
[YY
][YY
]=a
[YY
][YY
]+b
[YY
][YY
];
721 dest
[YY
][ZZ
]=a
[YY
][ZZ
]+b
[YY
][ZZ
];
722 dest
[ZZ
][XX
]=a
[ZZ
][XX
]+b
[ZZ
][XX
];
723 dest
[ZZ
][YY
]=a
[ZZ
][YY
]+b
[ZZ
][YY
];
724 dest
[ZZ
][ZZ
]=a
[ZZ
][ZZ
]+b
[ZZ
][ZZ
];
727 static inline void m_sub(matrix a
,matrix b
,matrix dest
)
729 dest
[XX
][XX
]=a
[XX
][XX
]-b
[XX
][XX
];
730 dest
[XX
][YY
]=a
[XX
][YY
]-b
[XX
][YY
];
731 dest
[XX
][ZZ
]=a
[XX
][ZZ
]-b
[XX
][ZZ
];
732 dest
[YY
][XX
]=a
[YY
][XX
]-b
[YY
][XX
];
733 dest
[YY
][YY
]=a
[YY
][YY
]-b
[YY
][YY
];
734 dest
[YY
][ZZ
]=a
[YY
][ZZ
]-b
[YY
][ZZ
];
735 dest
[ZZ
][XX
]=a
[ZZ
][XX
]-b
[ZZ
][XX
];
736 dest
[ZZ
][YY
]=a
[ZZ
][YY
]-b
[ZZ
][YY
];
737 dest
[ZZ
][ZZ
]=a
[ZZ
][ZZ
]-b
[ZZ
][ZZ
];
740 static inline void msmul(matrix m1
,real r1
,matrix dest
)
742 dest
[XX
][XX
]=r1
*m1
[XX
][XX
];
743 dest
[XX
][YY
]=r1
*m1
[XX
][YY
];
744 dest
[XX
][ZZ
]=r1
*m1
[XX
][ZZ
];
745 dest
[YY
][XX
]=r1
*m1
[YY
][XX
];
746 dest
[YY
][YY
]=r1
*m1
[YY
][YY
];
747 dest
[YY
][ZZ
]=r1
*m1
[YY
][ZZ
];
748 dest
[ZZ
][XX
]=r1
*m1
[ZZ
][XX
];
749 dest
[ZZ
][YY
]=r1
*m1
[ZZ
][YY
];
750 dest
[ZZ
][ZZ
]=r1
*m1
[ZZ
][ZZ
];
753 static inline void m_inv_ur0(matrix src
,matrix dest
)
755 double tmp
= src
[XX
][XX
]*src
[YY
][YY
]*src
[ZZ
][ZZ
];
756 if (fabs(tmp
) <= 100*GMX_REAL_MIN
)
757 gmx_fatal(FARGS
,"Can not invert matrix, determinant is zero");
759 dest
[XX
][XX
] = 1/src
[XX
][XX
];
760 dest
[YY
][YY
] = 1/src
[YY
][YY
];
761 dest
[ZZ
][ZZ
] = 1/src
[ZZ
][ZZ
];
762 dest
[ZZ
][XX
] = (src
[YY
][XX
]*src
[ZZ
][YY
]*dest
[YY
][YY
]
763 - src
[ZZ
][XX
])*dest
[XX
][XX
]*dest
[ZZ
][ZZ
];
764 dest
[YY
][XX
] = -src
[YY
][XX
]*dest
[XX
][XX
]*dest
[YY
][YY
];
765 dest
[ZZ
][YY
] = -src
[ZZ
][YY
]*dest
[YY
][YY
]*dest
[ZZ
][ZZ
];
771 static inline void m_inv(matrix src
,matrix dest
)
773 const real smallreal
= (real
)1.0e-24;
774 const real largereal
= (real
)1.0e24
;
781 if ((fc
<= smallreal
) || (fc
>= largereal
))
782 gmx_fatal(FARGS
,"Can not invert matrix, determinant = %e",deter
);
784 dest
[XX
][XX
]= c
*(src
[YY
][YY
]*src
[ZZ
][ZZ
]-src
[ZZ
][YY
]*src
[YY
][ZZ
]);
785 dest
[XX
][YY
]=-c
*(src
[XX
][YY
]*src
[ZZ
][ZZ
]-src
[ZZ
][YY
]*src
[XX
][ZZ
]);
786 dest
[XX
][ZZ
]= c
*(src
[XX
][YY
]*src
[YY
][ZZ
]-src
[YY
][YY
]*src
[XX
][ZZ
]);
787 dest
[YY
][XX
]=-c
*(src
[YY
][XX
]*src
[ZZ
][ZZ
]-src
[ZZ
][XX
]*src
[YY
][ZZ
]);
788 dest
[YY
][YY
]= c
*(src
[XX
][XX
]*src
[ZZ
][ZZ
]-src
[ZZ
][XX
]*src
[XX
][ZZ
]);
789 dest
[YY
][ZZ
]=-c
*(src
[XX
][XX
]*src
[YY
][ZZ
]-src
[YY
][XX
]*src
[XX
][ZZ
]);
790 dest
[ZZ
][XX
]= c
*(src
[YY
][XX
]*src
[ZZ
][YY
]-src
[ZZ
][XX
]*src
[YY
][YY
]);
791 dest
[ZZ
][YY
]=-c
*(src
[XX
][XX
]*src
[ZZ
][YY
]-src
[ZZ
][XX
]*src
[XX
][YY
]);
792 dest
[ZZ
][ZZ
]= c
*(src
[XX
][XX
]*src
[YY
][YY
]-src
[YY
][XX
]*src
[XX
][YY
]);
795 static inline void mvmul(matrix a
,const rvec src
,rvec dest
)
797 dest
[XX
]=a
[XX
][XX
]*src
[XX
]+a
[XX
][YY
]*src
[YY
]+a
[XX
][ZZ
]*src
[ZZ
];
798 dest
[YY
]=a
[YY
][XX
]*src
[XX
]+a
[YY
][YY
]*src
[YY
]+a
[YY
][ZZ
]*src
[ZZ
];
799 dest
[ZZ
]=a
[ZZ
][XX
]*src
[XX
]+a
[ZZ
][YY
]*src
[YY
]+a
[ZZ
][ZZ
]*src
[ZZ
];
802 static inline void mvmul_ur0(matrix a
,const rvec src
,rvec dest
)
804 dest
[ZZ
]=a
[ZZ
][XX
]*src
[XX
]+a
[ZZ
][YY
]*src
[YY
]+a
[ZZ
][ZZ
]*src
[ZZ
];
805 dest
[YY
]=a
[YY
][XX
]*src
[XX
]+a
[YY
][YY
];
806 dest
[XX
]=a
[XX
][XX
]*src
[XX
];
809 static inline void tmvmul_ur0(matrix a
,const rvec src
,rvec dest
)
811 dest
[XX
]=a
[XX
][XX
]*src
[XX
]+a
[YY
][XX
]*src
[YY
]+a
[ZZ
][XX
]*src
[ZZ
];
812 dest
[YY
]= a
[YY
][YY
]*src
[YY
]+a
[ZZ
][YY
]*src
[ZZ
];
813 dest
[ZZ
]= a
[ZZ
][ZZ
]*src
[ZZ
];
816 static inline void unitv(const rvec src
,rvec dest
)
820 linv
=gmx_invsqrt(norm2(src
));
821 dest
[XX
]=linv
*src
[XX
];
822 dest
[YY
]=linv
*src
[YY
];
823 dest
[ZZ
]=linv
*src
[ZZ
];
826 static inline void unitv_no_table(const rvec src
,rvec dest
)
830 linv
=1.0/sqrt(norm2(src
));
831 dest
[XX
]=linv
*src
[XX
];
832 dest
[YY
]=linv
*src
[YY
];
833 dest
[ZZ
]=linv
*src
[ZZ
];
836 static void calc_lll(rvec box
,rvec lll
)
838 lll
[XX
] = 2.0*M_PI
/box
[XX
];
839 lll
[YY
] = 2.0*M_PI
/box
[YY
];
840 lll
[ZZ
] = 2.0*M_PI
/box
[ZZ
];
843 static inline real
trace(matrix m
)
845 return (m
[XX
][XX
]+m
[YY
][YY
]+m
[ZZ
][ZZ
]);
848 static inline real
_divide(real a
,real b
,const char *file
,int line
)
850 if (fabs(b
) <= GMX_REAL_MIN
)
851 gmx_fatal(FARGS
,"Dividing by zero, file %s, line %d",file
,line
);
855 static inline int _mod(int a
,int b
,char *file
,int line
)
858 gmx_fatal(FARGS
,"Modulo zero, file %s, line %d",file
,line
);
862 /* Operations on multidimensional rvecs, used e.g. in edsam.c */
863 static void m_rveccopy(int dim
, rvec
*a
, rvec
*b
)
868 for (i
=0; i
<dim
; i
++)
869 copy_rvec(a
[i
],b
[i
]);
872 /*computer matrix vectors from base vectors and angles */
873 static void matrix_convert(matrix box
, rvec vec
, rvec angle
)
875 svmul(DEG2RAD
,angle
,angle
);
876 box
[XX
][XX
] = vec
[XX
];
877 box
[YY
][XX
] = vec
[YY
]*cos(angle
[ZZ
]);
878 box
[YY
][YY
] = vec
[YY
]*sin(angle
[ZZ
]);
879 box
[ZZ
][XX
] = vec
[ZZ
]*cos(angle
[YY
]);
880 box
[ZZ
][YY
] = vec
[ZZ
]
881 *(cos(angle
[XX
])-cos(angle
[YY
])*cos(angle
[ZZ
]))/sin(angle
[ZZ
]);
882 box
[ZZ
][ZZ
] = sqrt(sqr(vec
[ZZ
])
883 -box
[ZZ
][XX
]*box
[ZZ
][XX
]-box
[ZZ
][YY
]*box
[ZZ
][YY
]);
886 #define divide(a,b) _divide((a),(b),__FILE__,__LINE__)
887 #define mod(a,b) _mod((a),(b),__FILE__,__LINE__)