diff --git a/qpms/legendre.c b/qpms/legendre.c index aa547c5..9d3f19a 100644 --- a/qpms/legendre.c +++ b/qpms/legendre.c @@ -5,14 +5,16 @@ #include #include "indexing.h" #include +#include "qpms_error.h" // Legendre functions also for negative m, see DLMF 14.9.3 qpms_errno_t qpms_legendre_deriv_y_fill(double *target, double *target_deriv, double x, qpms_l_t lMax, gsl_sf_legendre_t lnorm, double csphase) { - size_t n = gsl_sf_legendre_array_n(lMax); - double *legendre_tmp = malloc(n * sizeof(double)); - double *legendre_deriv_tmp = malloc(n * sizeof(double)); + const size_t n = gsl_sf_legendre_array_n(lMax); + double *legendre_tmp, *legendre_deriv_tmp; + QPMS_CRASHING_MALLOC(legendre_tmp, n * sizeof(double)); + QPMS_CRASHING_MALLOC(legendre_deriv_tmp, n * sizeof(double)); int gsl_errno = gsl_sf_legendre_deriv_array_e( lnorm, (size_t)lMax, x, csphase, legendre_tmp,legendre_deriv_tmp); for (qpms_l_t l = 1; l <= lMax; ++l) @@ -22,153 +24,69 @@ qpms_errno_t qpms_legendre_deriv_y_fill(double *target, double *target_deriv, do target[y] = legendre_tmp[i]; target_deriv[y] = legendre_deriv_tmp[i]; } - switch(lnorm) { - case GSL_SF_LEGENDRE_NONE: - for (qpms_l_t l = 1; l <= lMax; ++l) - for (qpms_m_t m = 1; m <= l; ++m) { - qpms_y_t y = qpms_mn2y(-m,l); - size_t i = gsl_sf_legendre_array_index(l,m); - // viz DLMF 14.9.3, čert ví, jak je to s cs fasí. - double factor = exp(lgamma(l-m+1)-lgamma(l+m+1))*((m%2)?-1:1); - target[y] = factor * legendre_tmp[i]; - target_deriv[y] = factor * legendre_deriv_tmp[i]; - } - break; - case GSL_SF_LEGENDRE_SCHMIDT: - case GSL_SF_LEGENDRE_SPHARM: - case GSL_SF_LEGENDRE_FULL: - for (qpms_l_t l = 1; l <= lMax; ++l) - for (qpms_m_t m = 1; m <= l; ++m) { - qpms_y_t y = qpms_mn2y(-m,l); - size_t i = gsl_sf_legendre_array_index(l,m); - // viz DLMF 14.9.3, čert ví, jak je to s cs fasí. - double factor = ((m%2)?-1:1); // this is the difference from the unnormalised case - target[y] = factor * legendre_tmp[i]; - target_deriv[y] = factor * legendre_deriv_tmp[i]; - } - break; - default: - abort(); //NI - break; - } + + // Fill negative m's. + for (qpms_l_t l = 1; l <= lMax; ++l) + for (qpms_m_t m = 1; m <= l; ++m) { + qpms_y_t y = qpms_mn2y(-m,l); + size_t i = gsl_sf_legendre_array_index(l,m); + // cf. DLMF 14.9.3, but we're normalised. + double factor = ((m%2)?-1:1); + target[y] = factor * legendre_tmp[i]; + target_deriv[y] = factor * legendre_deriv_tmp[i]; + } + free(legendre_tmp); free(legendre_deriv_tmp); - return QPMS_SUCCESS; + return gsl_errno; } qpms_errno_t qpms_legendre_deriv_y_get(double **target, double **dtarget, double x, qpms_l_t lMax, gsl_sf_legendre_t lnorm, double csphase) { - *target = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); - *dtarget = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); + const qpms_y_t nelem = qpms_lMax2nelem(lMax); + QPMS_CRASHING_MALLOC(target, nelem * sizeof(double)); + QPMS_CRASHING_MALLOC(dtarget, nelem * sizeof(double)); return qpms_legendre_deriv_y_fill(*target, *dtarget, x, lMax, lnorm, csphase); } -qpms_pitau_t qpms_pitau_get(double theta, qpms_l_t lMax, qpms_normalisation_t norm) +qpms_pitau_t qpms_pitau_get(double theta, qpms_l_t lMax, const double csphase) { - const double csphase = qpms_normalisation_t_csphase(norm); - norm = qpms_normalisation_t_normonly(norm); + QPMS_ENSURE(fabs(csphase) == 1, "The csphase argument must be either 1 or -1, not %g.", csphase); qpms_pitau_t res; - qpms_y_t nelem = qpms_lMax2nelem(lMax); - res.pi = malloc(nelem * sizeof(double)); - res.tau = malloc(nelem * sizeof(double)); + const qpms_y_t nelem = qpms_lMax2nelem(lMax); + QPMS_CRASHING_MALLOC(res.leg, nelem * sizeof(double)); + QPMS_CRASHING_MALLOC(res.pi, nelem * sizeof(double)); + QPMS_CRASHING_MALLOC(res.tau, nelem * sizeof(double)); double ct = cos(theta), st = sin(theta); if (1 == fabs(ct)) { // singular case, use DLMF 14.8.2 - memset(res.pi, 0, nelem*sizeof(double)); - memset(res.tau, 0, nelem*sizeof(double)); - res.leg = calloc(nelem, sizeof(double)); - switch(norm) { - /* FIXME get rid of multiplicating the five lines */ - case QPMS_NORMALISATION_NONE: - for (qpms_l_t l = 1; l <= lMax; ++l) { - res.leg[qpms_mn2y(0, l)] = (l%2)?ct:1.; - double p = l*(l+1)/2; - const double n = 0.5; - int lpar = (l%2)?-1:1; - res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * p * csphase; - res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * n * csphase; - res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * p * csphase; - res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * n * csphase; - } - break; -#ifdef USE_XU_ANTINORMALISATION // broken - case QPMS_NORMALISATION_XU: // Rather useless except for testing. - for (qpms_l_t l = 1; l <= lMax; ++l) { - res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt(0.25*M_1_PI *(2*l+1)/(l*(l+1))); - double p = l*(l+1)/2 /* as in _NONE */ - * sqrt(0.25 * M_1_PI * (2*l + 1)); - double n = 0.5 /* as in _NONE */ - * sqrt(0.25 * M_1_PI * (2*l + 1)) / (l * (l+1)); - int lpar = (l%2)?-1:1; - res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * p * csphase; - res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * n * csphase; - res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * p * csphase; - res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * n * csphase; - } - break; -#endif - case QPMS_NORMALISATION_TAYLOR: - for (qpms_l_t l = 1; l <= lMax; ++l) { - res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt((2*l+1)*0.25*M_1_PI); - double fl = 0.25 * sqrt((2*l+1)*l*(l+1)*M_1_PI); - int lpar = (l%2)?-1:1; - res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * fl * csphase; - } - break; - case QPMS_NORMALISATION_POWER: - for (qpms_l_t l = 1; l <= lMax; ++l) { - res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt((2*l+1)/(4*M_PI *l*(l+1))); - double fl = 0.25 * sqrt((2*l+1)*M_1_PI); - int lpar = (l%2)?-1:1; - res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * fl * csphase; - } - break; - default: - abort(); + memset(res.pi, 0, nelem * sizeof(double)); + memset(res.tau, 0, nelem * sizeof(double)); + memset(res.leg, 0, nelem * sizeof(double)); + for (qpms_l_t l = 1; l <= lMax; ++l) { + res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt((2*l+1)/(4*M_PI *l*(l+1))); + double fl = 0.25 * sqrt((2*l+1)*M_1_PI); + int lpar = (l%2)?-1:1; + res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; + res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; + res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * fl * csphase; + res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * fl * csphase; } } else { // cos(theta) in (-1,1), use normal calculation - double *legder = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); - res.leg = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); - if (qpms_legendre_deriv_y_fill(res.leg, legder, ct, lMax, - norm == QPMS_NORMALISATION_NONE ? GSL_SF_LEGENDRE_NONE - : GSL_SF_LEGENDRE_SPHARM, csphase)) - abort(); - if (norm == QPMS_NORMALISATION_POWER) - /* for None (=non-normalized) and Taylor (=sph. harm. normalized) - * the correct normalisation is already obtained from gsl_sf_legendre_deriv_array_e(). - * However, Kristensson ("power") normalisation differs from Taylor - * by 1/sqrt(l*(l+1)) factor. - */ - for (qpms_l_t l = 1; l <= lMax; ++l) { - double prefac = 1./sqrt(l*(l+1)); - for (qpms_m_t m = -l; m <= l; ++m) { - res.leg[qpms_mn2y(m,l)] *= prefac; - legder[qpms_mn2y(m,l)] *= prefac; - } + double *legder; + QPMS_CRASHING_MALLOC(legder, nelem * sizeof(double)); + QPMS_ENSURE_SUCCESS(qpms_legendre_deriv_y_fill(res.leg, legder, ct, lMax, + GSL_SF_LEGENDRE_SPHARM, csphase)); + // Multiply by the "power normalisation" factor + for (qpms_l_t l = 1; l <= lMax; ++l) { + double prefac = 1./sqrt(l*(l+1)); + for (qpms_m_t m = -l; m <= l; ++m) { + res.leg[qpms_mn2y(m,l)] *= prefac; + legder[qpms_mn2y(m,l)] *= prefac; } -#ifdef USE_XU_ANTINORMALISATION - else if (norm == QPMS_NORMALISATION_XU) - /* for Xu (anti-normalized), we start from spharm-normalized Legendre functions - * Do not use this normalisation except for testing - */ - // FIXME PROBABLY BROKEN HERE - for (qpms_l_t l = 1; l <= lMax; ++l) { - double prefac = (2*l + 1) / sqrt(4*M_PI / (l*(l+1))); - for (qpms_m_t m = -l; m <= l; ++m) { - double fac = prefac * exp(lgamma(l+m+1) - lgamma(l-m+1)); - res.leg[qpms_mn2y(m,l)] *= fac; - legder[qpms_mn2y(m,l)] *= fac; - } - } -#endif + } for (qpms_l_t l = 1; l <= lMax; ++l) { for (qpms_m_t m = -l; m <= l; ++m) { res.pi [qpms_mn2y(m,l)] = m / st * res.leg[qpms_mn2y(m,l)]; diff --git a/qpms/qpms_specfunc.h b/qpms/qpms_specfunc.h index fe2fd55..2ca9ad6 100644 --- a/qpms/qpms_specfunc.h +++ b/qpms/qpms_specfunc.h @@ -52,17 +52,40 @@ double *qpms_legendre_minus1d_y_get(qpms_l_t lMax, qpms_normalisation_t norm); / -// array of Legendre and pi, tau auxillary functions (see [1,(37)]) -// This should handle correct evaluation for theta -> 0 and theta -> pi +/// Array of Legendre and and auxillary \f$\pi_{lm}, \tau_{lm} \f$ functions. +/** + * The leg, pi, tau arrays are indexed using the standard qpms_mn2y() VSWF indexing. + */ typedef struct { //qpms_normalisation_t norm; qpms_l_t lMax; //qpms_y_t nelem; double *leg, *pi, *tau; } qpms_pitau_t; -qpms_pitau_t qpms_pitau_get(double theta, qpms_l_t lMax, qpms_normalisation_t norm); -void qpms_pitau_free(qpms_pitau_t);//NI -void qpms_pitau_pfree(qpms_pitau_t*);//NI + +/// Returns an array of normalised Legendre and and auxillary \f$\pi_{lm}, \tau_{lm} \f$ functions. +/** + * The normalised Legendre function here is defined as + * \f[ + * \Fer[norm.]{l}{m} = \csphase^{-1} + * \sqrt{\frac{1}{l(l+1)}\frac{(l-m)!(2l+1)}{4\pi(l+m)!}}, + * \f] i.e. obtained using `gsl_sf_legendre_array_e()` with + * `norm = GSL_SF_LEGENDRE_SPHARM` and multiplied by \f$ \sqrt{l(l+1)} \f$. + * + * The auxillary functions are defined as + * \f[ + * \pi_{lm}(\cos \theta) = \frac{m}{\sin \theta} \Fer[norm.]{l}{m}(\cos\theta),\\ + * \tau_{lm}(\cos \theta) = \frac{\ud}{\ud \theta} \Fer[norm.]{l}{m}(\cos\theta) + * \f] + * with appropriate limit expression used if \f$ \abs{\cos\theta} = 1 \f$. + * + * When done, don't forget to deallocate the memory using qpms_pitau_free(). + * + */ +qpms_pitau_t qpms_pitau_get(double theta, qpms_l_t lMax, double csphase); +/// Frees the dynamically allocated arrays from qpms_pitau_t. +void qpms_pitau_free(qpms_pitau_t); +//void qpms_pitau_pfree(qpms_pitau_t*);//NI // Associated Legendre polynomial at zero argument (DLMF 14.5.1) DEPRECATED? double qpms_legendre0(int m, int n);