Fix saving lists of arrays with recent versions of numpy
[qpms.git] / qpms / symmetries.c
blob74321bbb29f825a25d73437c4e87cf8798410508
1 #include "symmetries.h"
2 #include "tiny_inlines.h"
3 #include "indexing.h"
4 #include "quaternions.h"
5 #include "qpms_error.h"
7 // TODO at some point, maybe support also other norms.
8 // (perhaps just use qpms_normalisation_t_factor() at the right places)
9 static inline void check_norm_compat(const qpms_vswf_set_spec_t *s)
11 switch (s->norm & QPMS_NORMALISATION_NORM_BITS) {
12 case QPMS_NORMALISATION_NORM_POWER:
13 break;
14 case QPMS_NORMALISATION_NORM_SPHARM:
15 break;
16 default:
17 QPMS_NOT_IMPLEMENTED("At the moment, only spherical harmonics of spherical harmonics or power normalisations implemented.");
22 static inline void ONLY_EIMF_IMPLEMENTED(const qpms_normalisation_t norm)
24 if (norm & QPMS_NORMALISATION_SPHARM_REAL)
25 QPMS_NOT_IMPLEMENTED("Support for real spherical harmonics not implemented yet.");
29 // Used in the functions below to ensure memory allocation and checks for bspec validity
30 static inline complex double *ensure_alloc(complex double *target,
31 const qpms_vswf_set_spec_t *bspec) {
32 check_norm_compat(bspec);
33 const size_t n = bspec->n;
34 if (target == NULL)
35 QPMS_CRASHING_MALLOC(target, n * n * sizeof(complex double));
36 return target;
40 complex double *qpms_zflip_uvswi_dense(
41 complex double *target,
42 const qpms_vswf_set_spec_t *bspec)
44 check_norm_compat(bspec);
45 target = ensure_alloc(target, bspec);
46 const size_t n = bspec->n;
48 for (size_t row = 0; row < n; row++) {
49 qpms_vswf_type_t rt;
50 qpms_l_t rl;
51 qpms_m_t rm;
52 qpms_uvswfi2tmn(bspec->ilist[row], &rt, &rm, &rl);
53 for (size_t col = 0; col < n; col++) {
54 qpms_vswf_type_t ct;
55 qpms_l_t cl;
56 qpms_m_t cm;
57 QPMS_ENSURE_SUCCESS(qpms_uvswfi2tmn(bspec->ilist[col], &ct, &cm, &cl));
58 if (rl == cl && rm == cm && rt == ct)
59 switch(rt) {
60 case QPMS_VSWF_ELECTRIC:
61 case QPMS_VSWF_LONGITUDINAL:
62 target[n*row + col] = min1pow(cm + cl);
63 break;
64 case QPMS_VSWF_MAGNETIC:
65 target[n*row + col] = -min1pow(cm + cl);
66 break;
67 default:
68 QPMS_INVALID_ENUM(rt);
70 else target[n*row + col] = 0;
73 return target;
76 complex double *qpms_yflip_uvswi_dense(
77 complex double *target,
78 const qpms_vswf_set_spec_t *bspec)
80 check_norm_compat(bspec);
81 ONLY_EIMF_IMPLEMENTED(bspec->norm);
82 target = ensure_alloc(target, bspec);
83 const size_t n = bspec->n;
85 for (size_t row = 0; row < n; row++) {
86 qpms_vswf_type_t rt;
87 qpms_l_t rl;
88 qpms_m_t rm;
89 qpms_uvswfi2tmn(bspec->ilist[row], &rt, &rm, &rl);
90 for (size_t col = 0; col < n; col++) {
91 qpms_vswf_type_t ct;
92 qpms_l_t cl;
93 qpms_m_t cm;
94 QPMS_ENSURE_SUCCESS(qpms_uvswfi2tmn(bspec->ilist[col], &ct, &cm, &cl));
95 if (rl == cl && rm == -cm && rt == ct)
96 switch(rt) {
97 case QPMS_VSWF_ELECTRIC:
98 case QPMS_VSWF_LONGITUDINAL:
99 target[n*row + col] = min1pow(rm);
100 break;
101 case QPMS_VSWF_MAGNETIC:
102 target[n*row + col] = -min1pow(rm);
103 break;
104 default:
105 QPMS_INVALID_ENUM(rt);
107 else target[n*row + col] = 0;
110 return target;
113 complex double *qpms_xflip_uvswi_dense(
114 complex double *target,
115 const qpms_vswf_set_spec_t *bspec)
117 check_norm_compat(bspec);
118 ONLY_EIMF_IMPLEMENTED(bspec->norm);
119 target = ensure_alloc(target, bspec);
120 const size_t n = bspec->n;
122 for (size_t row = 0; row < n; row++) {
123 qpms_vswf_type_t rt;
124 qpms_l_t rl;
125 qpms_m_t rm;
126 qpms_uvswfi2tmn(bspec->ilist[row], &rt, &rm, &rl);
127 for (size_t col = 0; col < n; col++) {
128 qpms_vswf_type_t ct;
129 qpms_l_t cl;
130 qpms_m_t cm;
131 QPMS_ENSURE_SUCCESS(qpms_uvswfi2tmn(bspec->ilist[col], &ct, &cm, &cl));
132 if (rl == cl && rm == -cm && rt == ct)
133 switch(rt) {
134 case QPMS_VSWF_ELECTRIC:
135 case QPMS_VSWF_LONGITUDINAL:
136 target[n*row + col] = 1;
137 break;
138 case QPMS_VSWF_MAGNETIC:
139 target[n*row + col] = -1;
140 break;
141 default:
142 QPMS_INVALID_ENUM(rt);
144 else target[n*row + col] = 0;
147 return target;
150 // Dense matrix representation of a rotation around the z-axis
151 complex double *qpms_zrot_uvswi_dense(
152 complex double *target, ///< If NULL, a new array is allocated.
153 const qpms_vswf_set_spec_t *bspec,
154 double phi ///< Rotation angle
157 QPMS_UNTESTED; // not sure about the C.-S. phase. Don't forget documenting it as well.
158 check_norm_compat(bspec);
159 ONLY_EIMF_IMPLEMENTED(bspec->norm);
160 target = ensure_alloc(target, bspec);
161 const size_t n = bspec->n;
163 for (size_t row = 0; row < n; row++) {
164 qpms_vswf_type_t rt;
165 qpms_l_t rl;
166 qpms_m_t rm;
167 qpms_uvswfi2tmn(bspec->ilist[row], &rt, &rm, &rl);
168 for (size_t col = 0; col < n; col++) {
169 qpms_vswf_type_t ct;
170 qpms_l_t cl;
171 qpms_m_t cm;
172 QPMS_ENSURE_SUCCESS(qpms_uvswfi2tmn(bspec->ilist[col], &ct, &cm, &cl));
173 if (rl == cl && rm == cm && rt == ct) // TODO COMPARE WITH PYTHON
174 target[n*row + col] = cexp(/* - ?*/I * rm * phi);
175 else target[n*row + col] = 0;
178 return target;
181 // Dense matrix representation of a "rational" rotation around the z-axis
182 /* Just for convenience. Corresponds to the angle \f$ \phi = 2\piw/N \f$.
184 complex double *qpms_zrot_rational_uvswi_dense(
185 complex double *target, ///< If NULL, a new array is allocated.
186 const qpms_vswf_set_spec_t *bspec,
187 int N,
188 int w
191 double phi = 2 * M_PI * w / N;
192 return qpms_zrot_uvswi_dense(target, bspec, phi);
195 complex double *qpms_irot3_uvswfi_dense(
196 complex double *target,
197 const qpms_vswf_set_spec_t *bspec,
198 const qpms_irot3_t t)
200 QPMS_UNTESTED; // not sure about the C.-S. phase. Don't forget documenting it as well.
201 check_norm_compat(bspec);
202 ONLY_EIMF_IMPLEMENTED(bspec->norm);
203 target = ensure_alloc(target, bspec);
204 const size_t n = bspec->n;
206 for (size_t row = 0; row < n; row++) {
207 qpms_vswf_type_t rt;
208 qpms_l_t rl;
209 qpms_m_t rm;
210 qpms_uvswfi2tmn(bspec->ilist[row], &rt, &rm, &rl);
211 for (size_t col = 0; col < n; col++) {
212 qpms_vswf_type_t ct;
213 qpms_l_t cl;
214 qpms_m_t cm;
215 QPMS_ENSURE_SUCCESS(qpms_uvswfi2tmn(bspec->ilist[col], &ct, &cm, &cl));
216 if (rl == cl && rt == ct)
217 // TODO qpms_vswf_irot_elem_from_irot3 might be slow and not too accurate for large l
218 target[n*row + col] = // Checkme rm and cm order
219 qpms_vswf_irot_elem_from_irot3(t,
220 rl, rm /* CHECKME here */, cm /* and here */,
221 rt == QPMS_VSWF_MAGNETIC);
222 else target[n*row + col] = 0;
225 return target;
228 size_t qpms_zero_roundoff_clean(double *arr, size_t nmemb, double atol) {
229 size_t changed = 0;
230 for(size_t i = 0; i < nmemb; ++i)
231 if(fabs(arr[i]) <= atol) {
232 arr[i] = 0;
233 ++changed;
235 return changed;
238 size_t qpms_czero_roundoff_clean(complex double *arr, size_t nmemb, double atol) {
239 size_t changed = 0;
240 for(size_t i = 0; i < nmemb; ++i) {
241 if(fabs(creal(arr[i])) <= atol) {
242 arr[i] = I*cimag(arr[i]);
243 ++changed;
245 if(fabs(cimag(arr[i])) <= atol) {
246 arr[i] = creal(arr[i]);
247 ++changed;