1 #include "qpms_specfunc.h"
2 #include "qpms_types.h"
3 #include <gsl/gsl_sf_legendre.h>
4 #include <gsl/gsl_math.h>
8 #include "qpms_error.h"
10 // Legendre functions also for negative m, see DLMF 14.9.3
11 qpms_errno_t
qpms_legendre_deriv_y_fill(double *target
, double *target_deriv
, double x
, qpms_l_t lMax
,
12 gsl_sf_legendre_t lnorm
, double csphase
)
14 const size_t n
= gsl_sf_legendre_array_n(lMax
);
15 double *legendre_tmp
, *legendre_deriv_tmp
;
16 QPMS_CRASHING_MALLOC(legendre_tmp
, n
* sizeof(double));
17 QPMS_CRASHING_MALLOC(legendre_deriv_tmp
, n
* sizeof(double));
18 int gsl_errno
= gsl_sf_legendre_deriv_array_e(
19 lnorm
, (size_t)lMax
, x
, csphase
, legendre_tmp
,legendre_deriv_tmp
);
20 for (qpms_l_t l
= 1; l
<= lMax
; ++l
)
21 for (qpms_m_t m
= 0; m
<= l
; ++m
) {
22 qpms_y_t y
= qpms_mn2y(m
,l
);
23 size_t i
= gsl_sf_legendre_array_index(l
,m
);
24 target
[y
] = legendre_tmp
[i
];
25 target_deriv
[y
] = legendre_deriv_tmp
[i
];
29 for (qpms_l_t l
= 1; l
<= lMax
; ++l
)
30 for (qpms_m_t m
= 1; m
<= l
; ++m
) {
31 qpms_y_t y
= qpms_mn2y(-m
,l
);
32 size_t i
= gsl_sf_legendre_array_index(l
,m
);
33 // cf. DLMF 14.9.3, but we're normalised.
34 double factor
= ((m
%2)?-1:1);
35 target
[y
] = factor
* legendre_tmp
[i
];
36 target_deriv
[y
] = factor
* legendre_deriv_tmp
[i
];
40 free(legendre_deriv_tmp
);
44 qpms_errno_t
qpms_legendre_deriv_y_get(double **target
, double **dtarget
, double x
, qpms_l_t lMax
, gsl_sf_legendre_t lnorm
,
48 const qpms_y_t nelem
= qpms_lMax2nelem(lMax
);
49 QPMS_CRASHING_MALLOC(target
, nelem
* sizeof(double));
50 QPMS_CRASHING_MALLOC(dtarget
, nelem
* sizeof(double));
51 return qpms_legendre_deriv_y_fill(*target
, *dtarget
, x
, lMax
, lnorm
, csphase
);
54 qpms_pitau_t
qpms_pitau_get(double theta
, qpms_l_t lMax
, const double csphase
)
57 const qpms_y_t nelem
= qpms_lMax2nelem(lMax
);
58 QPMS_CRASHING_MALLOC(res
.leg
, nelem
* sizeof(double));
59 QPMS_CRASHING_MALLOC(res
.pi
, nelem
* sizeof(double));
60 QPMS_CRASHING_MALLOC(res
.tau
, nelem
* sizeof(double));
61 qpms_pitau_fill(res
.leg
, res
.pi
, res
.tau
, theta
, lMax
, csphase
);
65 qpms_errno_t
qpms_pitau_fill(double *target_leg
, double *pi
, double *tau
, double theta
, qpms_l_t lMax
, double csphase
)
67 QPMS_ENSURE(fabs(csphase
) == 1, "The csphase argument must be either 1 or -1, not %g.", csphase
);
68 const qpms_y_t nelem
= qpms_lMax2nelem(lMax
);
70 double ct
= cos(theta
), st
= sin(theta
);
71 if (1 == fabs(ct
)) { // singular case, use DLMF 14.8.2
72 if(pi
) memset(pi
, 0, nelem
* sizeof(double));
73 if(tau
) memset(tau
, 0, nelem
* sizeof(double));
74 if(target_leg
) memset(target_leg
, 0, nelem
* sizeof(double));
75 for (qpms_l_t l
= 1; l
<= lMax
; ++l
) {
76 if(target_leg
) target_leg
[qpms_mn2y(0, l
)] = ((l
%2)?ct
:1.)*sqrt((2*l
+1)/(4*M_PI
*l
*(l
+1)));
77 double fl
= 0.25 * sqrt((2*l
+1)*M_1_PI
);
78 int lpar
= (l
%2)?-1:1;
80 pi
[qpms_mn2y(+1, l
)] = -((ct
>0) ? -1 : lpar
) * fl
* csphase
;
81 pi
[qpms_mn2y(-1, l
)] = -((ct
>0) ? -1 : lpar
) * fl
* csphase
;
84 tau
[qpms_mn2y(+1, l
)] = ((ct
>0) ? +1 : lpar
) * fl
* csphase
;
85 tau
[qpms_mn2y(-1, l
)] = -((ct
>0) ? +1 : lpar
) * fl
* csphase
;
89 else { // cos(theta) in (-1,1), use normal calculation
94 QPMS_CRASHING_MALLOC(leg
, nelem
*sizeof(double));
95 QPMS_CRASHING_MALLOC(legder
, nelem
* sizeof(double));
96 QPMS_ENSURE_SUCCESS(qpms_legendre_deriv_y_fill(leg
, legder
, ct
, lMax
,
97 GSL_SF_LEGENDRE_SPHARM
, csphase
));
98 // Multiply by the "power normalisation" factor
99 for (qpms_l_t l
= 1; l
<= lMax
; ++l
) {
100 double prefac
= 1./sqrt(l
*(l
+1));
101 for (qpms_m_t m
= -l
; m
<= l
; ++m
) {
102 leg
[qpms_mn2y(m
,l
)] *= prefac
;
103 legder
[qpms_mn2y(m
,l
)] *= prefac
;
106 for (qpms_l_t l
= 1; l
<= lMax
; ++l
) {
107 for (qpms_m_t m
= -l
; m
<= l
; ++m
) {
108 if(pi
) pi
[qpms_mn2y(m
,l
)] = m
/ st
* leg
[qpms_mn2y(m
,l
)];
109 if(tau
) tau
[qpms_mn2y(m
,l
)] = - st
* legder
[qpms_mn2y(m
,l
)];
119 void qpms_pitau_free(qpms_pitau_t x
) {