diff --git a/qpms/bessel.c b/qpms/bessel.c index 78097a7..e0600cf 100644 --- a/qpms/bessel.c +++ b/qpms/bessel.c @@ -12,110 +12,110 @@ #endif static inline complex double ipow(int x) { - return cpow(I,x); + return cpow(I,x); } // There is a big issue with gsl's precision of spherical bessel function; these have to be implemented differently qpms_errno_t qpms_sph_bessel_fill(qpms_bessel_t typ, qpms_l_t lmax, double x, complex double *result_array) { - int retval; - double tmparr[lmax+1]; - switch(typ) { - case QPMS_BESSEL_REGULAR: - retval = gsl_sf_bessel_jl_steed_array(lmax, x, tmparr); - for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; - return retval; - break; - case QPMS_BESSEL_SINGULAR: //FIXME: is this precise enough? Would it be better to do it one-by-one? - retval = gsl_sf_bessel_yl_array(lmax,x,tmparr); - for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; - return retval; - break; - case QPMS_HANKEL_PLUS: - case QPMS_HANKEL_MINUS: - retval = gsl_sf_bessel_jl_steed_array(lmax, x, tmparr); - for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; - if(retval) return retval; - retval = gsl_sf_bessel_yl_array(lmax, x, tmparr); - if (typ==QPMS_HANKEL_PLUS) - for (int l = 0; l <= lmax; ++l) result_array[l] += I * tmparr[l]; - else - for (int l = 0; l <= lmax; ++l) result_array[l] +=-I * tmparr[l]; - return retval; - break; - default: - abort(); - //return GSL_EDOM; - } - assert(0); + int retval; + double tmparr[lmax+1]; + switch(typ) { + case QPMS_BESSEL_REGULAR: + retval = gsl_sf_bessel_jl_steed_array(lmax, x, tmparr); + for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; + return retval; + break; + case QPMS_BESSEL_SINGULAR: //FIXME: is this precise enough? Would it be better to do it one-by-one? + retval = gsl_sf_bessel_yl_array(lmax,x,tmparr); + for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; + return retval; + break; + case QPMS_HANKEL_PLUS: + case QPMS_HANKEL_MINUS: + retval = gsl_sf_bessel_jl_steed_array(lmax, x, tmparr); + for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; + if(retval) return retval; + retval = gsl_sf_bessel_yl_array(lmax, x, tmparr); + if (typ==QPMS_HANKEL_PLUS) + for (int l = 0; l <= lmax; ++l) result_array[l] += I * tmparr[l]; + else + for (int l = 0; l <= lmax; ++l) result_array[l] +=-I * tmparr[l]; + return retval; + break; + default: + abort(); + //return GSL_EDOM; + } + assert(0); } static inline ptrdiff_t akn_index(qpms_l_t n, qpms_l_t k) { - assert(k <= n); - return ((ptrdiff_t) n + 1) * n / 2 + k; + assert(k <= n); + return ((ptrdiff_t) n + 1) * n / 2 + k; } static inline ptrdiff_t bkn_index(qpms_l_t n, qpms_l_t k) { - assert(k <= n+1); - return ((ptrdiff_t) n + 2) * (n + 1) / 2 - 1 + k; + assert(k <= n+1); + return ((ptrdiff_t) n + 2) * (n + 1) / 2 - 1 + k; } static inline qpms_errno_t qpms_sbessel_calculator_ensure_lMax(qpms_sbessel_calculator_t *c, qpms_l_t lMax) { - if (lMax <= c->lMax) - return QPMS_SUCCESS; - else { - if ( NULL == (c->akn = realloc(c->akn, sizeof(double) * akn_index(lMax + 2, 0)))) - abort(); - //if ( NULL == (c->bkn = realloc(c->bkn, sizeof(complex double) * bkn_index(lMax + 1, 0)))) - // abort(); - for(qpms_l_t n = c->lMax+1; n <= lMax + 1; ++n) - for(qpms_l_t k = 0; k <= n; ++k) - c->akn[akn_index(n,k)] = exp(lgamma(n + k + 1) - k*M_LN2 - lgamma(k + 1) - lgamma(n - k + 1)); - // ... TODO derivace - c->lMax = lMax; - return QPMS_SUCCESS; - } + if (lMax <= c->lMax) + return QPMS_SUCCESS; + else { + if ( NULL == (c->akn = realloc(c->akn, sizeof(double) * akn_index(lMax + 2, 0)))) + abort(); + //if ( NULL == (c->bkn = realloc(c->bkn, sizeof(complex double) * bkn_index(lMax + 1, 0)))) + // abort(); + for(qpms_l_t n = c->lMax+1; n <= lMax + 1; ++n) + for(qpms_l_t k = 0; k <= n; ++k) + c->akn[akn_index(n,k)] = exp(lgamma(n + k + 1) - k*M_LN2 - lgamma(k + 1) - lgamma(n - k + 1)); + // ... TODO derivace + c->lMax = lMax; + return QPMS_SUCCESS; + } } complex double qpms_sbessel_calc_h1(qpms_sbessel_calculator_t *c, qpms_l_t n, double x) { - if(QPMS_SUCCESS != qpms_sbessel_calculator_ensure_lMax(c, n)) - abort(); - complex double z = I/x; // FIXME this should be imaginary double, but gcc is broken? - complex double result = 0; - for (qpms_l_t k = n; k >= 0; --k) - // can we use fma for complex? - //result = fma(result, z, c->akn(n, k)); - result = result * z + c->akn[akn_index(n,k)]; - result *= z * ipow(-n-2) * cexp(I * x); - return result; + if(QPMS_SUCCESS != qpms_sbessel_calculator_ensure_lMax(c, n)) + abort(); + complex double z = I/x; // FIXME this should be imaginary double, but gcc is broken? + complex double result = 0; + for (qpms_l_t k = n; k >= 0; --k) + // can we use fma for complex? + //result = fma(result, z, c->akn(n, k)); + result = result * z + c->akn[akn_index(n,k)]; + result *= z * ipow(-n-2) * cexp(I * x); + return result; } qpms_errno_t qpms_sbessel_calc_h1_fill(qpms_sbessel_calculator_t * const c, - const qpms_l_t lMax, const double x, complex double * const target) { - if(QPMS_SUCCESS != qpms_sbessel_calculator_ensure_lMax(c, lMax)) - abort(); - memset(target, 0, sizeof(complex double) * lMax); - complex double kahancomp[lMax]; - memset(kahancomp, 0, sizeof(complex double) * lMax); - for(qpms_l_t k = 0; k <= lMax; ++k){ - double xp = pow(x, -k-1); - for(qpms_l_t l = k; l <= lMax; ++l) - ckahanadd(target + l, kahancomp + l, c->akn[akn_index(l,k)] * xp * ipow(k-l-1)); - } - complex double eix = cexp(I * x); - for (qpms_l_t l = 0; l <= lMax; ++l) - target[l] *= eix; - return QPMS_SUCCESS; + const qpms_l_t lMax, const double x, complex double * const target) { + if(QPMS_SUCCESS != qpms_sbessel_calculator_ensure_lMax(c, lMax)) + abort(); + memset(target, 0, sizeof(complex double) * lMax); + complex double kahancomp[lMax]; + memset(kahancomp, 0, sizeof(complex double) * lMax); + for(qpms_l_t k = 0; k <= lMax; ++k){ + double xp = pow(x, -k-1); + for(qpms_l_t l = k; l <= lMax; ++l) + ckahanadd(target + l, kahancomp + l, c->akn[akn_index(l,k)] * xp * ipow(k-l-1)); + } + complex double eix = cexp(I * x); + for (qpms_l_t l = 0; l <= lMax; ++l) + target[l] *= eix; + return QPMS_SUCCESS; } qpms_sbessel_calculator_t *qpms_sbessel_calculator_init() { - qpms_sbessel_calculator_t *c = malloc(sizeof(qpms_sbessel_calculator_t)); - c->akn = NULL; - //c->bkn = NULL; - c->lMax = -1; - return c; + qpms_sbessel_calculator_t *c = malloc(sizeof(qpms_sbessel_calculator_t)); + c->akn = NULL; + //c->bkn = NULL; + c->lMax = -1; + return c; } void qpms_sbessel_calculator_pfree(qpms_sbessel_calculator_t *c) { - if(c->akn) free(c->akn); - //if(c->bkn) free(c->bkn); - free(c); + if(c->akn) free(c->akn); + //if(c->bkn) free(c->bkn); + free(c); } diff --git a/qpms/legendre.c b/qpms/legendre.c index 71e2ae5..27f79cf 100644 --- a/qpms/legendre.c +++ b/qpms/legendre.c @@ -8,151 +8,151 @@ // Legendre functions also for negative m, see DLMF 14.9.3 qpms_errno_t qpms_legendre_deriv_y_fill(double *target, double *target_deriv, double x, qpms_l_t lMax, - gsl_sf_legendre_t lnorm, double csphase) + gsl_sf_legendre_t lnorm, double csphase) { - size_t n = gsl_sf_legendre_array_n(lMax); - double *legendre_tmp = malloc(n * sizeof(double)); - double *legendre_deriv_tmp = malloc(n * sizeof(double)); - int gsl_errno = gsl_sf_legendre_deriv_array_e( - lnorm, (size_t)lMax, x, csphase, legendre_tmp,legendre_deriv_tmp); - for (qpms_l_t l = 1; l <= lMax; ++l) - for (qpms_m_t m = 0; m <= l; ++m) { - qpms_y_t y = qpms_mn2y(m,l); - size_t i = gsl_sf_legendre_array_index(l,m); - target[y] = legendre_tmp[i]; - target_deriv[y] = legendre_deriv_tmp[i]; - } - switch(lnorm) { - case GSL_SF_LEGENDRE_NONE: - for (qpms_l_t l = 1; l <= lMax; ++l) - for (qpms_m_t m = 1; m <= l; ++m) { - qpms_y_t y = qpms_mn2y(-m,l); - size_t i = gsl_sf_legendre_array_index(l,m); - // viz DLMF 14.9.3, čert ví, jak je to s cs fasí. - double factor = exp(lgamma(l-m+1)-lgamma(l+m+1))*((m%2)?-1:1); - target[y] = factor * legendre_tmp[i]; - target_deriv[y] = factor * legendre_deriv_tmp[i]; - } - break; - case GSL_SF_LEGENDRE_SCHMIDT: - case GSL_SF_LEGENDRE_SPHARM: - case GSL_SF_LEGENDRE_FULL: - for (qpms_l_t l = 1; l <= lMax; ++l) - for (qpms_m_t m = 1; m <= l; ++m) { - qpms_y_t y = qpms_mn2y(-m,l); - size_t i = gsl_sf_legendre_array_index(l,m); - // viz DLMF 14.9.3, čert ví, jak je to s cs fasí. - double factor = ((m%2)?-1:1); // this is the difference from the unnormalised case - target[y] = factor * legendre_tmp[i]; - target_deriv[y] = factor * legendre_deriv_tmp[i]; - } - break; - default: - abort(); //NI - break; + size_t n = gsl_sf_legendre_array_n(lMax); + double *legendre_tmp = malloc(n * sizeof(double)); + double *legendre_deriv_tmp = malloc(n * sizeof(double)); + int gsl_errno = gsl_sf_legendre_deriv_array_e( + lnorm, (size_t)lMax, x, csphase, legendre_tmp,legendre_deriv_tmp); + for (qpms_l_t l = 1; l <= lMax; ++l) + for (qpms_m_t m = 0; m <= l; ++m) { + qpms_y_t y = qpms_mn2y(m,l); + size_t i = gsl_sf_legendre_array_index(l,m); + target[y] = legendre_tmp[i]; + target_deriv[y] = legendre_deriv_tmp[i]; + } + switch(lnorm) { + case GSL_SF_LEGENDRE_NONE: + for (qpms_l_t l = 1; l <= lMax; ++l) + for (qpms_m_t m = 1; m <= l; ++m) { + qpms_y_t y = qpms_mn2y(-m,l); + size_t i = gsl_sf_legendre_array_index(l,m); + // viz DLMF 14.9.3, čert ví, jak je to s cs fasí. + double factor = exp(lgamma(l-m+1)-lgamma(l+m+1))*((m%2)?-1:1); + target[y] = factor * legendre_tmp[i]; + target_deriv[y] = factor * legendre_deriv_tmp[i]; } - free(legendre_tmp); - free(legendre_deriv_tmp); - return QPMS_SUCCESS; + break; + case GSL_SF_LEGENDRE_SCHMIDT: + case GSL_SF_LEGENDRE_SPHARM: + case GSL_SF_LEGENDRE_FULL: + for (qpms_l_t l = 1; l <= lMax; ++l) + for (qpms_m_t m = 1; m <= l; ++m) { + qpms_y_t y = qpms_mn2y(-m,l); + size_t i = gsl_sf_legendre_array_index(l,m); + // viz DLMF 14.9.3, čert ví, jak je to s cs fasí. + double factor = ((m%2)?-1:1); // this is the difference from the unnormalised case + target[y] = factor * legendre_tmp[i]; + target_deriv[y] = factor * legendre_deriv_tmp[i]; + } + break; + default: + abort(); //NI + break; + } + free(legendre_tmp); + free(legendre_deriv_tmp); + return QPMS_SUCCESS; } qpms_errno_t qpms_legendre_deriv_y_get(double **target, double **dtarget, double x, qpms_l_t lMax, gsl_sf_legendre_t lnorm, - double csphase) + double csphase) { - *target = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); - *dtarget = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); - return qpms_legendre_deriv_y_fill(*target, *dtarget, x, lMax, lnorm, csphase); + *target = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); + *dtarget = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); + return qpms_legendre_deriv_y_fill(*target, *dtarget, x, lMax, lnorm, csphase); } qpms_pitau_t qpms_pitau_get(double theta, qpms_l_t lMax, qpms_normalisation_t norm) { - const double csphase = qpms_normalisation_t_csphase(norm); - norm = qpms_normalisation_t_normonly(norm); - qpms_pitau_t res; - qpms_y_t nelem = qpms_lMax2nelem(lMax); - res.pi = malloc(nelem * sizeof(double)); - res.tau = malloc(nelem * sizeof(double)); - double ct = cos(theta), st = sin(theta); - if (1 == fabs(ct)) { // singular case, use DLMF 14.8.2 - memset(res.pi, 0, nelem*sizeof(double)); - memset(res.tau, 0, nelem*sizeof(double)); - res.leg = calloc(nelem, sizeof(double)); - switch(norm) { - case QPMS_NORMALISATION_XU: - for (qpms_l_t l = 1; l <= lMax; ++l) { - res.leg[qpms_mn2y(0, l)] = (l%2)?ct:1.; - double p = l*(l+1)/2; - const double n = 0.5; - int lpar = (l%2)?-1:1; - res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * p * csphase; - res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * n * csphase; - res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * p * csphase; - res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * n * csphase; - } - break; - case QPMS_NORMALISATION_TAYLOR: - for (qpms_l_t l = 1; l <= lMax; ++l) { - res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt((2*l+1)*0.25*M_1_PI); - int lpar = (l%2)?-1:1; - double fl = 0.25 * sqrt((2*l+1)*l*(l+1)*M_1_PI); - res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * fl * csphase; - } - break; - case QPMS_NORMALISATION_POWER: - for (qpms_l_t l = 1; l <= lMax; ++l) { - res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt((2*l+1)/(4*M_PI *l*(l+1))); - int lpar = (l%2)?-1:1; - double fl = 0.25 * sqrt((2*l+1)*M_1_PI); - res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * fl * csphase; - res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * fl * csphase; + const double csphase = qpms_normalisation_t_csphase(norm); + norm = qpms_normalisation_t_normonly(norm); + qpms_pitau_t res; + qpms_y_t nelem = qpms_lMax2nelem(lMax); + res.pi = malloc(nelem * sizeof(double)); + res.tau = malloc(nelem * sizeof(double)); + double ct = cos(theta), st = sin(theta); + if (1 == fabs(ct)) { // singular case, use DLMF 14.8.2 + memset(res.pi, 0, nelem*sizeof(double)); + memset(res.tau, 0, nelem*sizeof(double)); + res.leg = calloc(nelem, sizeof(double)); + switch(norm) { + case QPMS_NORMALISATION_XU: + for (qpms_l_t l = 1; l <= lMax; ++l) { + res.leg[qpms_mn2y(0, l)] = (l%2)?ct:1.; + double p = l*(l+1)/2; + const double n = 0.5; + int lpar = (l%2)?-1:1; + res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * p * csphase; + res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * n * csphase; + res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * p * csphase; + res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * n * csphase; + } + break; + case QPMS_NORMALISATION_TAYLOR: + for (qpms_l_t l = 1; l <= lMax; ++l) { + res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt((2*l+1)*0.25*M_1_PI); + int lpar = (l%2)?-1:1; + double fl = 0.25 * sqrt((2*l+1)*l*(l+1)*M_1_PI); + res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; + res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; + res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * fl * csphase; + res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * fl * csphase; + } + break; + case QPMS_NORMALISATION_POWER: + for (qpms_l_t l = 1; l <= lMax; ++l) { + res.leg[qpms_mn2y(0, l)] = ((l%2)?ct:1.)*sqrt((2*l+1)/(4*M_PI *l*(l+1))); + int lpar = (l%2)?-1:1; + double fl = 0.25 * sqrt((2*l+1)*M_1_PI); + res.pi [qpms_mn2y(+1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; + res.pi [qpms_mn2y(-1, l)] = -((ct>0) ? -1 : lpar) * fl * csphase; + res.tau[qpms_mn2y(+1, l)] = ((ct>0) ? +1 : lpar) * fl * csphase; + res.tau[qpms_mn2y(-1, l)] = -((ct>0) ? +1 : lpar) * fl * csphase; - } - break; - default: - abort(); - } } - else { // cos(theta) in (-1,1), use normal calculation - double *legder = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); - res.leg = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); - if (qpms_legendre_deriv_y_fill(res.leg, legder, ct, lMax, - norm == QPMS_NORMALISATION_XU ? GSL_SF_LEGENDRE_NONE - : GSL_SF_LEGENDRE_SPHARM, csphase)) - abort(); - if (norm == QPMS_NORMALISATION_POWER) - /* for Xu (=non-normalized) and Taylor (=sph. harm. normalized) - * the correct normalisation is already obtained from gsl_sf_legendre_deriv_array_e(). - * However, Kristensson ("power") normalisation differs from Taylor - * by 1/sqrt(l*(l+1)) factor. - */ - for (qpms_l_t l = 1; l <= lMax; ++l) { - double prefac = 1./sqrt(l*(l+1)); - for (qpms_m_t m = -l; m <= l; ++m) { - res.leg[qpms_mn2y(m,l)] *= prefac; - legder[qpms_mn2y(m,l)] *= prefac; - } - } - for (qpms_l_t l = 1; l <= lMax; ++l) { - for (qpms_m_t m = -l; m <= l; ++m) { - res.pi [qpms_mn2y(m,l)] = m / st * res.leg[qpms_mn2y(m,l)]; - res.tau[qpms_mn2y(m,l)] = - st * legder[qpms_mn2y(m,l)]; - } - } - free(legder); + break; + default: + abort(); + } + } + else { // cos(theta) in (-1,1), use normal calculation + double *legder = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); + res.leg = malloc(sizeof(double)*qpms_lMax2nelem(lMax)); + if (qpms_legendre_deriv_y_fill(res.leg, legder, ct, lMax, + norm == QPMS_NORMALISATION_XU ? GSL_SF_LEGENDRE_NONE + : GSL_SF_LEGENDRE_SPHARM, csphase)) + abort(); + if (norm == QPMS_NORMALISATION_POWER) + /* for Xu (=non-normalized) and Taylor (=sph. harm. normalized) + * the correct normalisation is already obtained from gsl_sf_legendre_deriv_array_e(). + * However, Kristensson ("power") normalisation differs from Taylor + * by 1/sqrt(l*(l+1)) factor. + */ + for (qpms_l_t l = 1; l <= lMax; ++l) { + double prefac = 1./sqrt(l*(l+1)); + for (qpms_m_t m = -l; m <= l; ++m) { + res.leg[qpms_mn2y(m,l)] *= prefac; + legder[qpms_mn2y(m,l)] *= prefac; } - res.lMax = lMax; - return res; + } + for (qpms_l_t l = 1; l <= lMax; ++l) { + for (qpms_m_t m = -l; m <= l; ++m) { + res.pi [qpms_mn2y(m,l)] = m / st * res.leg[qpms_mn2y(m,l)]; + res.tau[qpms_mn2y(m,l)] = - st * legder[qpms_mn2y(m,l)]; + } + } + free(legder); + } + res.lMax = lMax; + return res; } void qpms_pitau_free(qpms_pitau_t x) { - free(x.leg); - free(x.pi); - free(x.tau); + free(x.leg); + free(x.pi); + free(x.tau); } diff --git a/qpms/qpms_types.h b/qpms/qpms_types.h index 921e80a..44b29ac 100644 --- a/qpms/qpms_types.h +++ b/qpms/qpms_types.h @@ -105,10 +105,10 @@ static inline double qpms_normalisation_t_factor_abssquare(qpms_normalisation_t typedef enum { - QPMS_BESSEL_REGULAR = 1, // regular function j - QPMS_BESSEL_SINGULAR = 2, // singular function y - QPMS_HANKEL_PLUS = 3, // hankel function h1 = j + I*y - QPMS_HANKEL_MINUS = 4, // hankel function h2 = j - I*y + QPMS_BESSEL_REGULAR = 1, // regular function j + QPMS_BESSEL_SINGULAR = 2, // singular function y + QPMS_HANKEL_PLUS = 3, // hankel function h1 = j + I*y + QPMS_HANKEL_MINUS = 4, // hankel function h2 = j - I*y QPMS_BESSEL_UNDEF = 0 } qpms_bessel_t; @@ -131,7 +131,7 @@ typedef struct { typedef struct { // Do I really need this??? complex double r; - double theta, phi; + double theta, phi; } csph_t; // complex vector components in local spherical basis diff --git a/qpms/translations.c b/qpms/translations.c index c2dd817..ce64eaf 100644 --- a/qpms/translations.c +++ b/qpms/translations.c @@ -20,7 +20,8 @@ * error/inconsintency in Xu's paper or something else) * Anyway, the zeroes give the correct _numerical_ values according to Xu's * paper tables (without Xu's typos, of course), while - * the predefined macros give the correct translations of the VSWFs. + * the predefined macros give the correct translations of the VSWFs for the + * QPMS_NORMALIZATION_TAYLOR_CS norm. */ #if !(defined AN0 || defined AN1 || defined AN2 || defined AN3) #pragma message "using AN1 macro as default" @@ -65,410 +66,342 @@ static const double sqrtpi = 1.7724538509055160272981674833411451827975494561223 // Associated Legendre polynomial at zero argument (DLMF 14.5.1) double qpms_legendre0(int m, int n) { - return pow(2,m) * sqrtpi / tgamma(.5*n - .5*m + .5) / tgamma(.5*n-.5*m); +TAYLOR: + return -0.5*(lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1)); + break; + case QPMS_NORMALISATION_XU: + return 0; + break; + default: + abort(); } - -static inline int min1pow(int x) { - return (x % 2) ? -1 : 1; -} - -static inline complex double ipow(int x) { - return cpow(I, x); -} - -// Derivative of associated Legendre polynomial at zero argument (DLMF 14.5.2) -double qpms_legendreD0(int m, int n) { - return -2 * qpms_legendre0(m, n); -} - - -static inline int imin(int x, int y) { - return x > y ? y : x; -} - -// The uppermost value of q index for the B coefficient terms from [Xu](60). -// N.B. this is different from [Xu_old](79) due to the n vs. n+1 difference. -// However, the trailing terms in [Xu_old] are analytically zero (although -// the numerical values will carry some non-zero rounding error). -static inline int gauntB_Q_max(int M, int n, int mu, int nu) { - return imin(n, imin(nu, (n+nu+1-abs(M+mu))/2)); -} - -int qpms_sph_bessel_fill(qpms_bessel_t typ, int lmax, double x, complex double *result_array) { - int retval; - double tmparr[lmax+1]; - switch(typ) { - case QPMS_BESSEL_REGULAR: - retval = gsl_sf_bessel_jl_steed_array(lmax, x, tmparr); - for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; - return retval; - break; - case QPMS_BESSEL_SINGULAR: //FIXME: is this precise enough? Would it be better to do it one-by-one? - retval = gsl_sf_bessel_yl_array(lmax,x,tmparr); - for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; - return retval; - break; - case QPMS_HANKEL_PLUS: - case QPMS_HANKEL_MINUS: - retval = gsl_sf_bessel_jl_steed_array(lmax, x, tmparr); - for (int l = 0; l <= lmax; ++l) result_array[l] = tmparr[l]; - if(retval) return retval; - retval = gsl_sf_bessel_yl_array(lmax, x, tmparr); - if (typ==QPMS_HANKEL_PLUS) - for (int l = 0; l <= lmax; ++l) result_array[l] += I * tmparr[l]; - else - for (int l = 0; l <= lmax; ++l) result_array[l] +=-I * tmparr[l]; - return retval; - break; - default: - abort(); - //return GSL_EDOM; - } - assert(0); -} - -static inline double qpms_trans_normlogfac(qpms_normalisation_t norm, - int m, int n, int mu, int nu) { - //int csphase = qpms_normalisation_t csphase(norm); // probably not needed here - norm = qpms_normalisation_t_normonly(norm); - switch(norm) { - case QPMS_NORMALISATION_KRISTENSSON: - case QPMS_NORMALISATION_TAYLOR: - return -0.5*(lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1)); - break; - case QPMS_NORMALISATION_XU: - return 0; - break; - default: - abort(); - } } static inline double qpms_trans_normfac(qpms_normalisation_t norm, - int m, int n, int mu, int nu) { - int csphase = qpms_normalisation_t_csphase(norm); // FIXME USEME TODO - norm = qpms_normalisation_t_normonly(norm); - double normfac = 1.; - switch(norm) { - case QPMS_NORMALISATION_KRISTENSSON: - normfac *= sqrt((nu*(nu+1.))/(n*(n+1.))); - case QPMS_NORMALISATION_TAYLOR: - normfac *= sqrt((2.*n+1)/(2.*nu+1)); - break; - case QPMS_NORMALISATION_XU: - break; - default: - abort(); - } + /*dest*/int m, int n, /*src*/int mu, int nu) { + int csphase = qpms_normalisation_t_csphase(norm); // FIXME USEME TODO + norm = qpms_normalisation_t_normonly(norm); + double normfac = 1.; + switch(norm) { + case QPMS_NORMALISATION_KRISTENSSON: + normfac *= sqrt((nu*(nu+1.))/(n*(n+1.))); + case QPMS_NORMALISATION_TAYLOR: + normfac *= sqrt((2.*n+1)/(2.*nu+1)); + break; + case QPMS_NORMALISATION_XU: + break; + default: + abort(); + } - return normfac; + return normfac; } complex double qpms_trans_single_A(qpms_normalisation_t norm, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { - if(r_ge_d) J = QPMS_BESSEL_REGULAR; + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J) { + if(r_ge_d) J = QPMS_BESSEL_REGULAR; - double costheta = cos(kdlj.theta); + double costheta = cos(kdlj.theta); - int qmax = gaunt_q_max(-m,n,mu,nu); // nemá tu být +m? - // N.B. -m !!!!!! - double a1q[qmax+1]; - int err; - gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); - double a1q0 = a1q[0]; - if (err) abort(); - int csphase = qpms_normalisation_t_csphase(norm); //FIXME EITHER TO NORMFAC OR USE HERE + int qmax = gaunt_q_max(-m,n,mu,nu); // nemá tu být +m? + // N.B. -m !!!!!! + double a1q[qmax+1]; + int err; + gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); + double a1q0 = a1q[0]; + if (err) abort(); + int csphase = qpms_normalisation_t_csphase(norm); //FIXME EITHER TO NORMFAC OR USE HERE - double leg[gsl_sf_legendre_array_n(n+nu)]; - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu,costheta,csphase,leg)) abort(); - complex double bes[n+nu+1]; - if (qpms_sph_bessel_fill(J, n+nu, kdlj.r, bes)) abort(); - complex double sum = 0; - for(int q = 0; q <= qmax; ++q) { - int p = n+nu-2*q; - int Pp_order = mu-m; - //if(p < abs(Pp_order)) continue; // FIXME raději nastav lépe meze - assert(p >= abs(Pp_order)); - double a1q_n = a1q[q] / a1q0; - double Pp = leg[gsl_sf_legendre_array_index(p, abs(Pp_order))]; - if (Pp_order < 0) Pp *= min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); - complex double zp = bes[p]; - complex double summandq = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n * zp * Pp; - sum += summandq; // TODO KAHAN - } + double leg[gsl_sf_legendre_array_n(n+nu)]; + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu,costheta,csphase,leg)) abort(); + complex double bes[n+nu+1]; + if (qpms_sph_bessel_fill(J, n+nu, kdlj.r, bes)) abort(); + complex double sum = 0; + for(int q = 0; q <= qmax; ++q) { + int p = n+nu-2*q; + int Pp_order = mu-m; + //if(p < abs(Pp_order)) continue; // FIXME raději nastav lépe meze + assert(p >= abs(Pp_order)); + double a1q_n = a1q[q] / a1q0; + double Pp = leg[gsl_sf_legendre_array_index(p, abs(Pp_order))]; + if (Pp_order < 0) Pp *= min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); + complex double zp = bes[p]; + complex double summandq = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n * zp * Pp; + sum += summandq; // TODO KAHAN + } - double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)); - complex double presum = exp(exponent); - presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n) / (4*n); + double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)); + complex double presum = exp(exponent); + presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n) / (4*n); - double normlogfac = qpms_trans_normlogfac(norm,m,n,mu,nu); - double normfac = qpms_trans_normfac(norm,m,n,mu,nu); + double normlogfac = qpms_trans_normlogfac(norm,m,n,mu,nu); + double normfac = qpms_trans_normfac(norm,m,n,mu,nu); - // ipow(n-nu) is the difference from the Taylor formula! - presum *= /*ipow(n-nu) * */(normfac * exp(normlogfac)); - return presum * sum; + // ipow(n-nu) is the difference from the Taylor formula! + presum *= /*ipow(n-nu) * */(normfac * exp(normlogfac)); + return presum * sum; } complex double qpms_trans_single_A_Taylor(int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { - if(r_ge_d) J = QPMS_BESSEL_REGULAR; + bool r_ge_d, qpms_bessel_t J) { + if(r_ge_d) J = QPMS_BESSEL_REGULAR; - double costheta = cos(kdlj.theta); + double costheta = cos(kdlj.theta); - int qmax = gaunt_q_max(-m,n,mu,nu); // nemá tu být +m? - // N.B. -m !!!!!! - double a1q[qmax+1]; - int err; - gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); - double a1q0 = a1q[0]; - if (err) abort(); + int qmax = gaunt_q_max(-m,n,mu,nu); // nemá tu být +m? + // N.B. -m !!!!!! + double a1q[qmax+1]; + int err; + gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); + double a1q0 = a1q[0]; + if (err) abort(); - double leg[gsl_sf_legendre_array_n(n+nu)]; - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu,costheta,-1,leg)) abort(); - complex double bes[n+nu+1]; - if (qpms_sph_bessel_fill(J, n+nu, kdlj.r, bes)) abort(); - complex double sum = 0; - for(int q = 0; q <= qmax; ++q) { - int p = n+nu-2*q; - int Pp_order = mu-m; - //if(p < abs(Pp_order)) continue; // FIXME raději nastav lépe meze - assert(p >= abs(Pp_order)); - double a1q_n = a1q[q] / a1q0; - double Pp = leg[gsl_sf_legendre_array_index(p, abs(Pp_order))]; - if (Pp_order < 0) Pp *= min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); - complex double zp = bes[p]; - complex double summandq = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n * zp * Pp; - sum += summandq; // TODO KAHAN - } + double leg[gsl_sf_legendre_array_n(n+nu)]; + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu,costheta,-1,leg)) abort(); + complex double bes[n+nu+1]; + if (qpms_sph_bessel_fill(J, n+nu, kdlj.r, bes)) abort(); + complex double sum = 0; + for(int q = 0; q <= qmax; ++q) { + int p = n+nu-2*q; + int Pp_order = mu-m; + //if(p < abs(Pp_order)) continue; // FIXME raději nastav lépe meze + assert(p >= abs(Pp_order)); + double a1q_n = a1q[q] / a1q0; + double Pp = leg[gsl_sf_legendre_array_index(p, abs(Pp_order))]; + if (Pp_order < 0) Pp *= min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); + complex double zp = bes[p]; + complex double summandq = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n * zp * Pp; + sum += summandq; // TODO KAHAN + } - double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)); - complex double presum = exp(exponent); - presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n) / (4*n); + double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)); + complex double presum = exp(exponent); + presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n) / (4*n); - // N.B. ipow(nu-n) is different from the general formula! - complex double prenormratio = ipow(nu-n) * sqrt(((2.*nu+1)/(2.*n+1))* exp( - lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1))); - return (presum / prenormratio) * sum; + // N.B. ipow(nu-n) is different from the general formula! + complex double prenormratio = ipow(nu-n) * sqrt(((2.*nu+1)/(2.*n+1))* exp( + lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1))); + return (presum / prenormratio) * sum; } // [Xu_old], eq. (83) complex double qpms_trans_single_B_Xu(int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { - assert(0); // FIXME probably gives wrong values, do not use. - if(r_ge_d) J = QPMS_BESSEL_REGULAR; - double costheta = cos(kdlj.theta); + bool r_ge_d, qpms_bessel_t J) { + assert(0); // FIXME probably gives wrong values, do not use. + if(r_ge_d) J = QPMS_BESSEL_REGULAR; + double costheta = cos(kdlj.theta); - // TODO Qmax cleanup: can I replace Qmax with realQmax??? - int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); - int Qmax = gaunt_q_max(-m,n+1,mu,nu); - int realQmax = gauntB_Q_max(-m, n, mu, nu); - double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; - int err; - if (mu == nu) { - for (int q = 0; q <= q2max; ++q) - a2q[q] = 0; - a2q0 = 1; - } - else { - gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); - a2q0 = a2q[0]; - } - gaunt_xu(-m,n+1,mu,nu,Qmax,a3q,&err); if (err) abort(); - a3q0 = a3q[0]; + // TODO Qmax cleanup: can I replace Qmax with realQmax??? + int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); + int Qmax = gaunt_q_max(-m,n+1,mu,nu); + int realQmax = gauntB_Q_max(-m, n, mu, nu); + double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; + int err; + if (mu == nu) { + for (int q = 0; q <= q2max; ++q) + a2q[q] = 0; + a2q0 = 1; + } + else { + gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); + a2q0 = a2q[0]; + } + gaunt_xu(-m,n+1,mu,nu,Qmax,a3q,&err); if (err) abort(); + a3q0 = a3q[0]; - double leg[gsl_sf_legendre_array_n(n+nu+1)]; - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1,costheta,-1,leg)) abort(); - complex double bes[n+nu+2]; - if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bes)) abort(); + double leg[gsl_sf_legendre_array_n(n+nu+1)]; + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1,costheta,-1,leg)) abort(); + complex double bes[n+nu+2]; + if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bes)) abort(); - complex double sum = 0; - for (int q = 0; q <= realQmax; ++q) { - int p = n+nu-2*q; - double a2q_n = a2q[q]/a2q0; - double a3q_n = a3q[q]/a3q0; - complex double zp_ = bes[p+1]; - int Pp_order_ = mu-m; - //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze - assert(p+1 >= abs(Pp_order_)); - double Pp_ = leg[gsl_sf_legendre_array_index(p+1, abs(Pp_order_))]; - if (Pp_order_ < 0) Pp_ *= min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); - complex double summandq = ((2*(n+1)*(nu-mu)*a2q_n - -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) - *min1pow(q) * zp_ * Pp_); - sum += summandq; // TODO KAHAN - } + complex double sum = 0; + for (int q = 0; q <= realQmax; ++q) { + int p = n+nu-2*q; + double a2q_n = a2q[q]/a2q0; + double a3q_n = a3q[q]/a3q0; + complex double zp_ = bes[p+1]; + int Pp_order_ = mu-m; + //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze + assert(p+1 >= abs(Pp_order_)); + double Pp_ = leg[gsl_sf_legendre_array_index(p+1, abs(Pp_order_))]; + if (Pp_order_ < 0) Pp_ *= min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); + complex double summandq = ((2*(n+1)*(nu-mu)*a2q_n + -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) + *min1pow(q) * zp_ * Pp_); + sum += summandq; // TODO KAHAN + } - double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)); - complex double presum = exp(exponent); - presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n+1) / ( - (4*n)*(n+1)*(n+m+1)); + double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)); + complex double presum = exp(exponent); + presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n+1) / ( + (4*n)*(n+1)*(n+m+1)); - // Taylor normalisation v2, proven to be equivalent - complex double prenormratio = ipow(nu-n); + // Taylor normalisation v2, proven to be equivalent + complex double prenormratio = ipow(nu-n); - return (presum / prenormratio) * sum; + return (presum / prenormratio) * sum; } complex double qpms_trans_single_B(qpms_normalisation_t norm, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J) { #ifndef USE_BROKEN_SINGLETC - assert(0); // FIXME probably gives wrong values, do not use. + assert(0); // FIXME probably gives wrong values, do not use. #endif - if(r_ge_d) J = QPMS_BESSEL_REGULAR; - double costheta = cos(kdlj.theta); + if(r_ge_d) J = QPMS_BESSEL_REGULAR; + double costheta = cos(kdlj.theta); - int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); - int Qmax = gaunt_q_max(-m,n+1,mu,nu); - int realQmax = gauntB_Q_max(-m,n,mu,nu); - double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; - int err; - if (mu == nu) { - for (int q = 0; q <= q2max; ++q) - a2q[q] = 0; - a2q0 = 1; - } - else { - gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); - a2q0 = a2q[0]; - } - gaunt_xu(-m,n+1,mu,nu,Qmax,a3q,&err); if (err) abort(); - a3q0 = a3q[0]; + int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); + int Qmax = gaunt_q_max(-m,n+1,mu,nu); + int realQmax = gauntB_Q_max(-m,n,mu,nu); + double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; + int err; + if (mu == nu) { + for (int q = 0; q <= q2max; ++q) + a2q[q] = 0; + a2q0 = 1; + } + else { + gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); + a2q0 = a2q[0]; + } + gaunt_xu(-m,n+1,mu,nu,Qmax,a3q,&err); if (err) abort(); + a3q0 = a3q[0]; - double leg[gsl_sf_legendre_array_n(n+nu+1)]; - int csphase = qpms_normalisation_t_csphase(norm);// FIXME EITHER TO NORMFAC OR USE HERE - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1,costheta,csphase,leg)) abort(); - complex double bes[n+nu+2]; - if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bes)) abort(); + double leg[gsl_sf_legendre_array_n(n+nu+1)]; + int csphase = qpms_normalisation_t_csphase(norm);// FIXME EITHER TO NORMFAC OR USE HERE + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1,costheta,csphase,leg)) abort(); + complex double bes[n+nu+2]; + if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bes)) abort(); - complex double sum = 0; - for (int q = 0; q <= realQmax; ++q) { - int p = n+nu-2*q; - double a2q_n = a2q[q]/a2q0; - double a3q_n = a3q[q]/a3q0; - complex double zp_ = bes[p+1]; - int Pp_order_ = mu-m; - //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze - assert(p+1 >= abs(Pp_order_)); - double Pp_ = leg[gsl_sf_legendre_array_index(p+1, abs(Pp_order_))]; - if (Pp_order_ < 0) Pp_ *= min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); - complex double summandq = ((2*(n+1)*(nu-mu)*a2q_n - -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) - *min1pow(q) * zp_ * Pp_); - sum += summandq; //TODO KAHAN - } + complex double sum = 0; + for (int q = 0; q <= realQmax; ++q) { + int p = n+nu-2*q; + double a2q_n = a2q[q]/a2q0; + double a3q_n = a3q[q]/a3q0; + complex double zp_ = bes[p+1]; + int Pp_order_ = mu-m; + //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze + assert(p+1 >= abs(Pp_order_)); + double Pp_ = leg[gsl_sf_legendre_array_index(p+1, abs(Pp_order_))]; + if (Pp_order_ < 0) Pp_ *= min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); + complex double summandq = ((2*(n+1)*(nu-mu)*a2q_n + -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) + *min1pow(q) * zp_ * Pp_); + sum += summandq; //TODO KAHAN + } - double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)); - complex double presum = exp(exponent); - presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n+1) / ( - (4*n)*(n+1)*(n+m+1)); + double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)); + complex double presum = exp(exponent); + presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n+1) / ( + (4*n)*(n+1)*(n+m+1)); - double normlogfac = qpms_trans_normlogfac(norm,m,n,mu,nu); - double normfac = qpms_trans_normfac(norm,m,n,mu,nu); + double normlogfac = qpms_trans_normlogfac(norm,m,n,mu,nu); + double normfac = qpms_trans_normfac(norm,m,n,mu,nu); - // ipow(n-nu) is the difference from the "old Taylor" formula - presum *= /*ipow(n-nu) * */(exp(normlogfac) * normfac); + // ipow(n-nu) is the difference from the "old Taylor" formula + presum *= /*ipow(n-nu) * */(exp(normlogfac) * normfac); - return presum * sum; + return presum * sum; } complex double qpms_trans_single_B_Taylor(int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { - assert(0); // FIXME probably gives wrong values, do not use. - if(r_ge_d) J = QPMS_BESSEL_REGULAR; - double costheta = cos(kdlj.theta); + bool r_ge_d, qpms_bessel_t J) { + assert(0); // FIXME probably gives wrong values, do not use. + if(r_ge_d) J = QPMS_BESSEL_REGULAR; + double costheta = cos(kdlj.theta); - int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); - int Qmax = gaunt_q_max(-m,n+1,mu,nu); - int realQmax = gauntB_Q_max(-m,n,mu,nu); - double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; - int err; - if (mu == nu) { - for (int q = 0; q <= q2max; ++q) - a2q[q] = 0; - a2q0 = 1; - } - else { - gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); - a2q0 = a2q[0]; - } - gaunt_xu(-m,n+1,mu,nu,Qmax,a3q,&err); if (err) abort(); - a3q0 = a3q[0]; + int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); + int Qmax = gaunt_q_max(-m,n+1,mu,nu); + int realQmax = gauntB_Q_max(-m,n,mu,nu); + double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; + int err; + if (mu == nu) { + for (int q = 0; q <= q2max; ++q) + a2q[q] = 0; + a2q0 = 1; + } + else { + gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); + a2q0 = a2q[0]; + } + gaunt_xu(-m,n+1,mu,nu,Qmax,a3q,&err); if (err) abort(); + a3q0 = a3q[0]; - double leg[gsl_sf_legendre_array_n(n+nu+1)]; - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1,costheta,-1,leg)) abort(); - complex double bes[n+nu+2]; - if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bes)) abort(); + double leg[gsl_sf_legendre_array_n(n+nu+1)]; + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1,costheta,-1,leg)) abort(); + complex double bes[n+nu+2]; + if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bes)) abort(); - complex double sum = 0; - for (int q = 0; q <= realQmax; ++q) { - int p = n+nu-2*q; - double a2q_n = a2q[q]/a2q0; - double a3q_n = a3q[q]/a3q0; - complex double zp_ = bes[p+1]; - int Pp_order_ = mu-m; - //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze - assert(p+1 >= abs(Pp_order_)); - double Pp_ = leg[gsl_sf_legendre_array_index(p+1, abs(Pp_order_))]; - if (Pp_order_ < 0) Pp_ *= min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); - complex double summandq = ((2*(n+1)*(nu-mu)*a2q_n - -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) - *min1pow(q) * zp_ * Pp_); - sum += summandq; //TODO KAHAN - } + complex double sum = 0; + for (int q = 0; q <= realQmax; ++q) { + int p = n+nu-2*q; + double a2q_n = a2q[q]/a2q0; + double a3q_n = a3q[q]/a3q0; + complex double zp_ = bes[p+1]; + int Pp_order_ = mu-m; + //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze + assert(p+1 >= abs(Pp_order_)); + double Pp_ = leg[gsl_sf_legendre_array_index(p+1, abs(Pp_order_))]; + if (Pp_order_ < 0) Pp_ *= min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); + complex double summandq = ((2*(n+1)*(nu-mu)*a2q_n + -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) + *min1pow(q) * zp_ * Pp_); + sum += summandq; //TODO KAHAN + } - double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)); - complex double presum = exp(exponent); - presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n+1) / ( - (4*n)*(n+1)*(n+m+1)); + double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)); + complex double presum = exp(exponent); + presum *= cexp(I*(mu-m)*kdlj.phi) * min1pow(m) * ipow(nu+n+1) / ( + (4*n)*(n+1)*(n+m+1)); - // Taylor normalisation v2, proven to be equivalent - // ipow(nu-n) is different from the new general formula!!! - complex double prenormratio = ipow(nu-n) * sqrt(((2.*nu+1)/(2.*n+1))* exp( - lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1))); + // Taylor normalisation v2, proven to be equivalent + // ipow(nu-n) is different from the new general formula!!! + complex double prenormratio = ipow(nu-n) * sqrt(((2.*nu+1)/(2.*n+1))* exp( + lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1))); - return (presum / prenormratio) * sum; + return (presum / prenormratio) * sum; } complex double qpms_trans_single_A_Taylor_ext(int m, int n, int mu, int nu, - double kdlj_r, double kdlj_theta, double kdlj_phi, int r_ge_d, int J) { - sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; - return qpms_trans_single_A_Taylor(m,n,mu,nu,kdlj,r_ge_d,J); + double kdlj_r, double kdlj_theta, double kdlj_phi, int r_ge_d, int J) { + sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; + return qpms_trans_single_A_Taylor(m,n,mu,nu,kdlj,r_ge_d,J); } complex double qpms_trans_single_B_Taylor_ext(int m, int n, int mu, int nu, - double kdlj_r, double kdlj_theta, double kdlj_phi, int r_ge_d, int J) { - sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; - return qpms_trans_single_B_Taylor(m,n,mu,nu,kdlj,r_ge_d,J); + double kdlj_r, double kdlj_theta, double kdlj_phi, int r_ge_d, int J) { + sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; + return qpms_trans_single_B_Taylor(m,n,mu,nu,kdlj,r_ge_d,J); } void qpms_trans_calculator_free(qpms_trans_calculator *c) { - free(c->A_multipliers[0]); - free(c->A_multipliers); - free(c->B_multipliers[0]); - free(c->B_multipliers); - free(c); + free(c->A_multipliers[0]); + free(c->A_multipliers); + free(c->B_multipliers[0]); + free(c->B_multipliers); + free(c); } static inline size_t qpms_trans_calculator_index_mnmunu(const qpms_trans_calculator *c, - int m, int n, int mu, int nu){ - return c->nelem * qpms_mn2y(m,n) + qpms_mn2y(mu,nu); + int m, int n, int mu, int nu){ + return c->nelem * qpms_mn2y(m,n) + qpms_mn2y(mu,nu); } static inline size_t qpms_trans_calculator_index_yyu(const qpms_trans_calculator *c, - size_t y, size_t yu) { - return c->nelem * y + yu; + size_t y, size_t yu) { + return c->nelem * y + yu; } @@ -478,657 +411,657 @@ static inline int isq(int x) { return x * x; } static inline double fsq(double x) {return x * x; } static void qpms_trans_calculator_multipliers_A_general( - qpms_normalisation_t norm, - complex double *dest, int m, int n, int mu, int nu, int qmax) { - assert(qmax == gaunt_q_max(-m,n,mu,nu)); - double a1q[qmax+1]; - int err; - gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); - if (err) abort(); - double a1q0 = a1q[0]; + qpms_normalisation_t norm, + complex double *dest, int m, int n, int mu, int nu, int qmax) { + assert(qmax == gaunt_q_max(-m,n,mu,nu)); + double a1q[qmax+1]; + int err; + gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); + if (err) abort(); + double a1q0 = a1q[0]; - int csphase = qpms_normalisation_t_csphase(norm); //TODO FIXME use this - norm = qpms_normalisation_t_normonly(norm); - double normlogfac = qpms_trans_normlogfac(norm,m,n,mu,nu); - double normfac = qpms_trans_normfac(norm,m,n,mu,nu); + int csphase = qpms_normalisation_t_csphase(norm); //TODO FIXME use this + norm = qpms_normalisation_t_normonly(norm); + double normlogfac = qpms_trans_normlogfac(norm,m,n,mu,nu); + double normfac = qpms_trans_normfac(norm,m,n,mu,nu); - // TODO use csphase to modify normfac here!!!! - // normfac = xxx ? -normfac : normfac; - normfac *= min1pow(m); //different from old Taylor - double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)) - + normlogfac; - complex double presum = exp(exponent); - presum *= normfac / (4.*n); - presum *= ipow(n+nu); // different from old Taylor + // TODO use csphase to modify normfac here!!!! + // normfac = xxx ? -normfac : normfac; + normfac *= min1pow(m); //different from old Taylor + double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)) + + normlogfac; + complex double presum = exp(exponent); + presum *= normfac / (4.*n); + presum *= ipow(n+nu); // different from old Taylor - for(int q = 0; q <= qmax; q++) { - int p = n+nu-2*q; - int Pp_order = mu - m; - assert(p >= abs(Pp_order)); - double a1q_n = a1q[q] / a1q0; - // Assuming non_normalized legendre polynomials (normalisation done here by hand)! - double Ppfac = (Pp_order >= 0) ? 1 : - min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); - double summandfac = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n; - dest[q] = presum * summandfac * Ppfac + for(int q = 0; q <= qmax; q++) { + int p = n+nu-2*q; + int Pp_order = mu - m; + assert(p >= abs(Pp_order)); + double a1q_n = a1q[q] / a1q0; + // Assuming non_normalized legendre polynomials (normalisation done here by hand)! + double Ppfac = (Pp_order >= 0) ? 1 : + min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); + double summandfac = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n; + dest[q] = presum * summandfac * Ppfac #ifdef AN1 - * ipow(n-nu) + * ipow(n-nu) #elif defined AN2 - * min1pow(-n+nu) + * min1pow(-n+nu) #elif defined AN3 - * ipow (nu - n) + * ipow (nu - n) #endif #ifdef AM2 - * min1pow(-m+mu) + * min1pow(-m+mu) #endif //NNU - ; - // FIXME I might not need complex here - } + ; + // FIXME I might not need complex here + } } // as in [Xu](61) double cruzan_bfactor(int M, int n, int mu, int nu, int p) { - double logprefac = lgamma(n+M+1) - lgamma(n-M+1) + lgamma(nu+mu+1) - lgamma(nu-mu+1) - + lgamma(p-M-mu+2) - lgamma(p+M+mu+2); - logprefac *= 0.5; - return min1pow(mu+M) * (2*p+3) * exp(logprefac) - * gsl_sf_coupling_3j(2*n, 2*nu, 2*(p+1), 2*M, 2*mu, 2*(-M-mu)) - * gsl_sf_coupling_3j(2*n, 2*nu, 2*p, 0, 0, 0); + double logprefac = lgamma(n+M+1) - lgamma(n-M+1) + lgamma(nu+mu+1) - lgamma(nu-mu+1) + + lgamma(p-M-mu+2) - lgamma(p+M+mu+2); + logprefac *= 0.5; + return min1pow(mu+M) * (2*p+3) * exp(logprefac) + * gsl_sf_coupling_3j(2*n, 2*nu, 2*(p+1), 2*M, 2*mu, 2*(-M-mu)) + * gsl_sf_coupling_3j(2*n, 2*nu, 2*p, 0, 0, 0); } void qpms_trans_calculator_multipliers_B_general( - qpms_normalisation_t norm, - complex double *dest, int m, int n, int mu, int nu, int Qmax){ - // This is according to the Cruzan-type formula [Xu](59) - assert(Qmax == gauntB_Q_max(-m,n,mu,nu)); + qpms_normalisation_t norm, + complex double *dest, int m, int n, int mu, int nu, int Qmax){ + // This is according to the Cruzan-type formula [Xu](59) + assert(Qmax == gauntB_Q_max(-m,n,mu,nu)); - - - int csphase = qpms_normalisation_t_csphase(norm); //TODO FIXME use this - norm = qpms_normalisation_t_normonly(norm); - double normlogfac= qpms_trans_normlogfac(norm,m,n,mu,nu); - double normfac = qpms_trans_normfac(norm,m,n,mu,nu); - // TODO use csphase to modify normfac here!!!! - // normfac = xxx ? -normfac : normfac; - //normfac *= min1pow(m);//different from old taylor - double presum = min1pow(1-m) * (2*nu+1)/(2.*(n*(n+1))) - * exp(lgamma(n+m+1) - lgamma(n-m+1) + lgamma(nu-mu+1) - lgamma(nu+mu+1) - + normlogfac) - * normfac; - - for(int q = BQ_OFFSET; q <= Qmax; ++q) { - int p = n+nu-2*q; + + int csphase = qpms_normalisation_t_csphase(norm); //TODO FIXME use this + norm = qpms_normalisation_t_normonly(norm); + double normlogfac= qpms_trans_normlogfac(norm,m,n,mu,nu); + double normfac = qpms_trans_normfac(norm,m,n,mu,nu); + // TODO use csphase to modify normfac here!!!! + // normfac = xxx ? -normfac : normfac; + //normfac *= min1pow(m);//different from old taylor + + double presum = min1pow(1-m) * (2*nu+1)/(2.*(n*(n+1))) + * exp(lgamma(n+m+1) - lgamma(n-m+1) + lgamma(nu-mu+1) - lgamma(nu+mu+1) + + normlogfac) + * normfac; + + for(int q = BQ_OFFSET; q <= Qmax; ++q) { + int p = n+nu-2*q; int Pp_order = mu - m; // Assuming non-normalised Legendre polynomials, normalise here by hand. // Ppfac_ differs from Ppfac in the A-case by the substitution p->p+1 double Ppfac_ = (Pp_order >= 0)? 1 : min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order)-lgamma(1+1+p-Pp_order)); - double t = sqrt( - (isq(p+1)-isq(n-nu)) - * (isq(n+nu+1)-isq(p+1)) - ); - dest[q-BQ_OFFSET] = presum * t * Ppfac_ - * cruzan_bfactor(-m,n,mu,nu,p) * ipow(p+1) + double t = sqrt( + (isq(p+1)-isq(n-nu)) + * (isq(n+nu+1)-isq(p+1)) + ); + dest[q-BQ_OFFSET] = presum * t * Ppfac_ + * cruzan_bfactor(-m,n,mu,nu,p) * ipow(p+1) #ifdef BN1 - * ipow(n-nu) + * ipow(n-nu) #elif defined BN2 - * min1pow(-n+nu) + * min1pow(-n+nu) #elif defined BN3 - * ipow (nu - n) + * ipow (nu - n) #endif #ifdef BM2 - * min1pow(-m+mu) + * min1pow(-m+mu) #endif #ifdef BF1 - * I + * I #elif defined BF2 - * (-1) + * (-1) #elif defined BF3 - * (-I) + * (-I) #endif - ;// NNU - } + ;// NNU + } } /*static*/ void qpms_trans_calculator_multipliers_B_general_oldXu( - qpms_normalisation_t norm, - complex double *dest, int m, int n, int mu, int nu, int Qmax) { - assert(0); // FIXME probably gives wrong values, do not use. - assert(Qmax == gauntB_Q_max(-m,n,mu,nu)); - int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); - // assert(Qmax == q2max); - // FIXME is it safe to replace q2max with Qmax in gaunt_xu?? - double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; - int err; - if (mu == nu) { - for (int q = 0; q <= q2max; ++q) - a2q[q] = 0; - a2q0 = 1; - } - else { - gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); - a2q0 = a2q[0]; - } - gaunt_xu(-m,n+1,mu,nu,q2max,a3q,&err); if (err) abort(); // FIXME this should probably go away - a3q0 = a3q[0]; + qpms_normalisation_t norm, + complex double *dest, int m, int n, int mu, int nu, int Qmax) { + assert(0); // FIXME probably gives wrong values, do not use. + assert(Qmax == gauntB_Q_max(-m,n,mu,nu)); + int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); + // assert(Qmax == q2max); + // FIXME is it safe to replace q2max with Qmax in gaunt_xu?? + double a2q[q2max+1], a3q[Qmax+1], a2q0, a3q0; + int err; + if (mu == nu) { + for (int q = 0; q <= q2max; ++q) + a2q[q] = 0; + a2q0 = 1; + } + else { + gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); + a2q0 = a2q[0]; + } + gaunt_xu(-m,n+1,mu,nu,q2max,a3q,&err); if (err) abort(); // FIXME this should probably go away + a3q0 = a3q[0]; - int csphase = qpms_normalisation_t_csphase(norm); //TODO FIXME use this - norm = qpms_normalisation_t_normonly(norm); - double normlogfac= qpms_trans_normlogfac(norm,m,n,mu,nu); - double normfac = qpms_trans_normfac(norm,m,n,mu,nu); - // TODO use csphase to modify normfac here!!!! - // normfac = xxx ? -normfac : normfac; - normfac *= min1pow(m);//different from old taylor + int csphase = qpms_normalisation_t_csphase(norm); //TODO FIXME use this + norm = qpms_normalisation_t_normonly(norm); + double normlogfac= qpms_trans_normlogfac(norm,m,n,mu,nu); + double normfac = qpms_trans_normfac(norm,m,n,mu,nu); + // TODO use csphase to modify normfac here!!!! + // normfac = xxx ? -normfac : normfac; + normfac *= min1pow(m);//different from old taylor - double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)) - +normlogfac; - complex double presum = exp(exponent); - presum *= I * ipow(nu+n) /*different from old Taylor */ * normfac / ( - (4*n)*(n+1)*(n+m+1)); + double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)) + +normlogfac; + complex double presum = exp(exponent); + presum *= I * ipow(nu+n) /*different from old Taylor */ * normfac / ( + (4*n)*(n+1)*(n+m+1)); - for (int q = BQ_OFFSET; q <= Qmax; ++q) { - int p = n+nu-2*q; - double a2q_n = a2q[q]/a2q0; - double a3q_n = a3q[q]/a3q0; - int Pp_order_ = mu-m; - //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze - assert(p+1 >= abs(Pp_order_)); - double Ppfac = (Pp_order_ >= 0) ? 1 : + for (int q = BQ_OFFSET; q <= Qmax; ++q) { + int p = n+nu-2*q; + double a2q_n = a2q[q]/a2q0; + double a3q_n = a3q[q]/a3q0; + int Pp_order_ = mu-m; + //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze + assert(p+1 >= abs(Pp_order_)); + double Ppfac = (Pp_order_ >= 0) ? 1 : - min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); - double summandq = ((2*(n+1)*(nu-mu)*a2q_n - -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) - *min1pow(q)); - dest[q-BQ_OFFSET] = Ppfac * summandq * presum; - } + min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); + double summandq = ((2*(n+1)*(nu-mu)*a2q_n + -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) + *min1pow(q)); + dest[q-BQ_OFFSET] = Ppfac * summandq * presum; + } } //#if 0 static void qpms_trans_calculator_multipliers_A_Taylor( - complex double *dest, int m, int n, int mu, int nu, int qmax) { - assert(qmax == gaunt_q_max(-m,n,mu,nu)); - double a1q[qmax+1]; - int err; - gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); - if (err) abort(); - double a1q0 = a1q[0]; + complex double *dest, int m, int n, int mu, int nu, int qmax) { + assert(qmax == gaunt_q_max(-m,n,mu,nu)); + double a1q[qmax+1]; + int err; + gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); + if (err) abort(); + double a1q0 = a1q[0]; - double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)) - 0.5*( // ex-prenormratio - lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1)); - double presum = exp(exponent); - presum *= min1pow(m+n) * sqrt((2.*n+1)/(2.*nu+1)) / (4*n); + double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)) - 0.5*( // ex-prenormratio + lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1)); + double presum = exp(exponent); + presum *= min1pow(m+n) * sqrt((2.*n+1)/(2.*nu+1)) / (4*n); - for(int q = 0; q <= qmax; q++) { - int p = n+nu-2*q; - int Pp_order = mu - m; - assert(p >= abs(Pp_order)); - double a1q_n = a1q[q] / a1q0; - // Assuming non_normalized legendre polynomials! - double Ppfac = (Pp_order >= 0) ? 1 : - min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); - double summandfac = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n; - dest[q] = presum * summandfac * Ppfac; - // FIXME I might not need complex here - } + for(int q = 0; q <= qmax; q++) { + int p = n+nu-2*q; + int Pp_order = mu - m; + assert(p >= abs(Pp_order)); + double a1q_n = a1q[q] / a1q0; + // Assuming non_normalized legendre polynomials! + double Ppfac = (Pp_order >= 0) ? 1 : + min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); + double summandfac = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n; + dest[q] = presum * summandfac * Ppfac; + // FIXME I might not need complex here + } } //#endif #if 0 static void qpms_trans_calculator_multipliers_A_Taylor( - complex double *dest, int m, int n, int mu, int nu, int qmax) { - assert(qmax == gaunt_q_max(-m,n,mu,nu)); - double a1q[qmax+1]; - int err; - gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); - if (err) abort(); - double a1q0 = a1q[0]; - for(int q = 0; q <= qmax; ++q) { - int p = n+nu-2*q; - int Pp_order = mu-m; - //if(p < abs(Pp_order)) continue; // FIXME raději nastav lépe meze - assert(p >= abs(Pp_order)); - double a1q_n = a1q[q] / a1q0; - //double Pp = leg[gsl_sf_legendre_array_index(p, abs(Pp_order))]; - //complex double zp = bes[p]; - dest[q] = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n /* * zp * Pp*/; - if (Pp_order < 0) dest[q] *= min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); - //sum += summandq; - } + complex double *dest, int m, int n, int mu, int nu, int qmax) { + assert(qmax == gaunt_q_max(-m,n,mu,nu)); + double a1q[qmax+1]; + int err; + gaunt_xu(-m,n,mu,nu,qmax,a1q,&err); + if (err) abort(); + double a1q0 = a1q[0]; + for(int q = 0; q <= qmax; ++q) { + int p = n+nu-2*q; + int Pp_order = mu-m; + //if(p < abs(Pp_order)) continue; // FIXME raději nastav lépe meze + assert(p >= abs(Pp_order)); + double a1q_n = a1q[q] / a1q0; + //double Pp = leg[gsl_sf_legendre_array_index(p, abs(Pp_order))]; + //complex double zp = bes[p]; + dest[q] = (n*(n+1) + nu*(nu+1) - p*(p+1)) * min1pow(q) * a1q_n /* * zp * Pp*/; + if (Pp_order < 0) dest[q] *= min1pow(mu-m) * exp(lgamma(1+p+Pp_order)-lgamma(1+p-Pp_order)); + //sum += summandq; + } - double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)); - complex double presum = exp(exponent); - presum *=/* cexp(I*(mu-m)*kdlj.phi) * */ min1pow(m) * ipow(nu+n) / (4*n); + double exponent=(lgamma(2*n+1)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+1)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+1) - lgamma(2*(n+nu)+1)); + complex double presum = exp(exponent); + presum *=/* cexp(I*(mu-m)*kdlj.phi) * */ min1pow(m) * ipow(nu+n) / (4*n); - complex double prenormratio = ipow(nu-n) * sqrt(((2.*nu+1)/(2.*n+1))* exp( - lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1))); - //return (presum / prenormratio) * sum; - for(int q=0;q<=qmax;++q) dest[q] *= presum / prenormratio; + complex double prenormratio = ipow(nu-n) * sqrt(((2.*nu+1)/(2.*n+1))* exp( + lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1)-lgamma(nu+mu+1))); + //return (presum / prenormratio) * sum; + for(int q=0;q<=qmax;++q) dest[q] *= presum / prenormratio; } #endif static void qpms_trans_calculator_multipliers_B_Taylor( - complex double *dest, int m, int n, int mu, int nu, int Qmax) { - assert(0); // FIXME probably gives wrong values, do not use. - assert(Qmax == gauntB_Q_max(-m,n,mu,nu)); - int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); - //assert(Qmax == q2max); - // FIXME remove the q2max variable altogether, as it is probably equal - // to Qmax - double a2q[q2max+1], a3q[q2max+1], a2q0, a3q0; - int err; - if (mu == nu) { - for (int q = 0; q <= q2max; ++q) - a2q[q] = 0; - a2q0 = 1; - } - else { - gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); - a2q0 = a2q[0]; - } - gaunt_xu(-m,n+1,mu,nu,q2max,a3q,&err); if (err) abort(); - a3q0 = a3q[0]; + complex double *dest, int m, int n, int mu, int nu, int Qmax) { + assert(0); // FIXME probably gives wrong values, do not use. + assert(Qmax == gauntB_Q_max(-m,n,mu,nu)); + int q2max = gaunt_q_max(-m-1,n+1,mu+1,nu); + //assert(Qmax == q2max); + // FIXME remove the q2max variable altogether, as it is probably equal + // to Qmax + double a2q[q2max+1], a3q[q2max+1], a2q0, a3q0; + int err; + if (mu == nu) { + for (int q = 0; q <= q2max; ++q) + a2q[q] = 0; + a2q0 = 1; + } + else { + gaunt_xu(-m-1,n+1,mu+1,nu,q2max,a2q,&err); if (err) abort(); + a2q0 = a2q[0]; + } + gaunt_xu(-m,n+1,mu,nu,q2max,a3q,&err); if (err) abort(); + a3q0 = a3q[0]; - double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) - +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) - +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)) - 0.5 * ( - lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1) - -lgamma(nu+mu+1)); - complex double presum = exp(exponent); - presum *= I * (min1pow(m+n) *sqrt((2.*n+1)/(2.*nu+1)) / ( - (4*n)*(n+1)*(n+m+1))); + double exponent=(lgamma(2*n+3)-lgamma(n+2)+lgamma(2*nu+3)-lgamma(nu+2) + +lgamma(n+nu+m-mu+2)-lgamma(n-m+1)-lgamma(nu+mu+1) + +lgamma(n+nu+2) - lgamma(2*(n+nu)+3)) - 0.5 * ( + lgamma(n+m+1)-lgamma(n-m+1)+lgamma(nu-mu+1) + -lgamma(nu+mu+1)); + complex double presum = exp(exponent); + presum *= I * (min1pow(m+n) *sqrt((2.*n+1)/(2.*nu+1)) / ( + (4*n)*(n+1)*(n+m+1))); - for (int q = BQ_OFFSET; q <= Qmax; ++q) { - int p = n+nu-2*q; - double a2q_n = a2q[q]/a2q0; - double a3q_n = a3q[q]/a3q0; - int Pp_order_ = mu-m; - //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze - assert(p+1 >= abs(Pp_order_)); - double Ppfac = (Pp_order_ >= 0) ? 1 : + for (int q = BQ_OFFSET; q <= Qmax; ++q) { + int p = n+nu-2*q; + double a2q_n = a2q[q]/a2q0; + double a3q_n = a3q[q]/a3q0; + int Pp_order_ = mu-m; + //if(p+1 < abs(Pp_order_)) continue; // FIXME raději nastav lépe meze + assert(p+1 >= abs(Pp_order_)); + double Ppfac = (Pp_order_ >= 0) ? 1 : - min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); - double summandq = ((2*(n+1)*(nu-mu)*a2q_n - -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) - *min1pow(q)); - dest[q-BQ_OFFSET] = Ppfac * summandq * presum; - } + min1pow(mu-m) * exp(lgamma(1+1+p+Pp_order_)-lgamma(1+1+p-Pp_order_)); + double summandq = ((2*(n+1)*(nu-mu)*a2q_n + -(-nu*(nu+1) - n*(n+3) - 2*mu*(n+1)+p*(p+3))* a3q_n) + *min1pow(q)); + dest[q-BQ_OFFSET] = Ppfac * summandq * presum; + } } int qpms_trans_calculator_multipliers_A(qpms_normalisation_t norm, complex double *dest, int m, int n, int mu, int nu, int qmax) { - switch (qpms_normalisation_t_normonly(norm)) { - case QPMS_NORMALISATION_TAYLOR: + switch (qpms_normalisation_t_normonly(norm)) { + case QPMS_NORMALISATION_TAYLOR: #ifdef USE_SEPARATE_TAYLOR - qpms_trans_calculator_multipliers_A_Taylor(dest,m,n,mu,nu,qmax); - return 0; - break; + qpms_trans_calculator_multipliers_A_Taylor(dest,m,n,mu,nu,qmax); + return 0; + break; #endif - case QPMS_NORMALISATION_XU: - case QPMS_NORMALISATION_KRISTENSSON: - qpms_trans_calculator_multipliers_A_general(norm, dest, m, n, mu, nu, qmax); - return 0; - break; - default: - abort(); - } - assert(0); + case QPMS_NORMALISATION_XU: + case QPMS_NORMALISATION_KRISTENSSON: + qpms_trans_calculator_multipliers_A_general(norm, dest, m, n, mu, nu, qmax); + return 0; + break; + default: + abort(); + } + assert(0); } int qpms_trans_calculator_multipliers_B(qpms_normalisation_t norm, complex double *dest, int m, int n, int mu, int nu, int Qmax) { - switch (qpms_normalisation_t_normonly(norm)) { - case QPMS_NORMALISATION_TAYLOR: + switch (qpms_normalisation_t_normonly(norm)) { + case QPMS_NORMALISATION_TAYLOR: #ifdef USE_SEPARATE_TAYLOR - qpms_trans_calculator_multipliers_B_Taylor(dest,m,n,mu,nu,Qmax); - return 0; - break; + qpms_trans_calculator_multipliers_B_Taylor(dest,m,n,mu,nu,Qmax); + return 0; + break; #endif - case QPMS_NORMALISATION_XU: - case QPMS_NORMALISATION_KRISTENSSON: - qpms_trans_calculator_multipliers_B_general(norm, dest, m, n, mu, nu, Qmax); - return 0; - break; - default: - abort(); - } - assert(0); + case QPMS_NORMALISATION_XU: + case QPMS_NORMALISATION_KRISTENSSON: + qpms_trans_calculator_multipliers_B_general(norm, dest, m, n, mu, nu, Qmax); + return 0; + break; + default: + abort(); + } + assert(0); } qpms_trans_calculator *qpms_trans_calculator_init (int lMax, qpms_normalisation_t normalisation) { - assert(lMax > 0); - qpms_trans_calculator *c = malloc(sizeof(qpms_trans_calculator)); - c->lMax = lMax; - c->nelem = lMax * (lMax+2); - c->A_multipliers = malloc((1+SQ(c->nelem)) * sizeof(complex double *)); - c->B_multipliers = malloc((1+SQ(c->nelem)) * sizeof(complex double *)); - c->normalisation = normalisation; - size_t *qmaxes = malloc(SQ(c->nelem) * sizeof(size_t)); - size_t qmaxsum = 0; - for(size_t y = 0; y < c->nelem; y++) - for(size_t yu = 0; yu < c->nelem; yu++) { - int m,n, mu, nu; - qpms_y2mn_p(y,&m,&n); - qpms_y2mn_p(yu,&mu,&nu); - qmaxsum += 1 + ( - qmaxes[qpms_trans_calculator_index_yyu(c,y,yu)] - = gaunt_q_max(-m,n,mu,nu)); - } - c->A_multipliers[0] = malloc(qmaxsum * sizeof(complex double)); - // calculate multiplier beginnings - for(size_t i = 0; i < SQ(c->nelem); ++i) - c->A_multipliers[i+1] = c->A_multipliers[i] + qmaxes[i] + 1; - // calculate the multipliers - for(size_t y = 0; y < c->nelem; ++y) - for(size_t yu = 0; yu < c->nelem; ++yu) { - size_t i = y * c->nelem + yu; - int m, n, mu, nu; - qpms_y2mn_p(y, &m, &n); - qpms_y2mn_p(yu, &mu, &nu); - qpms_trans_calculator_multipliers_A(normalisation, - c->A_multipliers[i], m, n, mu, nu, qmaxes[i]); - } + assert(lMax > 0); + qpms_trans_calculator *c = malloc(sizeof(qpms_trans_calculator)); + c->lMax = lMax; + c->nelem = lMax * (lMax+2); + c->A_multipliers = malloc((1+SQ(c->nelem)) * sizeof(complex double *)); + c->B_multipliers = malloc((1+SQ(c->nelem)) * sizeof(complex double *)); + c->normalisation = normalisation; + size_t *qmaxes = malloc(SQ(c->nelem) * sizeof(size_t)); + size_t qmaxsum = 0; + for(size_t y = 0; y < c->nelem; y++) + for(size_t yu = 0; yu < c->nelem; yu++) { + int m,n, mu, nu; + qpms_y2mn_p(y,&m,&n); + qpms_y2mn_p(yu,&mu,&nu); + qmaxsum += 1 + ( + qmaxes[qpms_trans_calculator_index_yyu(c,y,yu)] + = gaunt_q_max(-m,n,mu,nu)); + } + c->A_multipliers[0] = malloc(qmaxsum * sizeof(complex double)); + // calculate multiplier beginnings + for(size_t i = 0; i < SQ(c->nelem); ++i) + c->A_multipliers[i+1] = c->A_multipliers[i] + qmaxes[i] + 1; + // calculate the multipliers + for(size_t y = 0; y < c->nelem; ++y) + for(size_t yu = 0; yu < c->nelem; ++yu) { + size_t i = y * c->nelem + yu; + int m, n, mu, nu; + qpms_y2mn_p(y, &m, &n); + qpms_y2mn_p(yu, &mu, &nu); + qpms_trans_calculator_multipliers_A(normalisation, + c->A_multipliers[i], m, n, mu, nu, qmaxes[i]); + } - qmaxsum = 0; - for(size_t y=0; y < c->nelem; y++) - for(size_t yu = 0; yu < c->nelem; yu++) { - int m, n, mu, nu; - qpms_y2mn_p(y,&m,&n); - qpms_y2mn_p(yu,&mu,&nu); - qmaxsum += (1 - BQ_OFFSET) + ( - qmaxes[qpms_trans_calculator_index_yyu(c,y,yu)] - = gauntB_Q_max(-m,n,mu,nu)); - } - c->B_multipliers[0] = malloc(qmaxsum * sizeof(complex double)); - // calculate multiplier beginnings - for(size_t i = 0; i < SQ(c->nelem); ++i) - c->B_multipliers[i+1] = c->B_multipliers[i] + qmaxes[i] + (1 - BQ_OFFSET); - // calculate the multipliers - for(size_t y = 0; y < c->nelem; ++y) - for(size_t yu = 0; yu < c->nelem; ++yu) { - size_t i = y * c->nelem + yu; - int m, n, mu, nu; - qpms_y2mn_p(y, &m, &n); - qpms_y2mn_p(yu, &mu, &nu); - qpms_trans_calculator_multipliers_B(normalisation, - c->B_multipliers[i], m, n, mu, nu, qmaxes[i]); - } + qmaxsum = 0; + for(size_t y=0; y < c->nelem; y++) + for(size_t yu = 0; yu < c->nelem; yu++) { + int m, n, mu, nu; + qpms_y2mn_p(y,&m,&n); + qpms_y2mn_p(yu,&mu,&nu); + qmaxsum += (1 - BQ_OFFSET) + ( + qmaxes[qpms_trans_calculator_index_yyu(c,y,yu)] + = gauntB_Q_max(-m,n,mu,nu)); + } + c->B_multipliers[0] = malloc(qmaxsum * sizeof(complex double)); + // calculate multiplier beginnings + for(size_t i = 0; i < SQ(c->nelem); ++i) + c->B_multipliers[i+1] = c->B_multipliers[i] + qmaxes[i] + (1 - BQ_OFFSET); + // calculate the multipliers + for(size_t y = 0; y < c->nelem; ++y) + for(size_t yu = 0; yu < c->nelem; ++yu) { + size_t i = y * c->nelem + yu; + int m, n, mu, nu; + qpms_y2mn_p(y, &m, &n); + qpms_y2mn_p(yu, &mu, &nu); + qpms_trans_calculator_multipliers_B(normalisation, + c->B_multipliers[i], m, n, mu, nu, qmaxes[i]); + } - free(qmaxes); - return c; + free(qmaxes); + return c; } static inline complex double qpms_trans_calculator_get_A_precalcbuf(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J, - const complex double *bessel_buf, const double *legendre_buf) { - size_t i = qpms_trans_calculator_index_mnmunu(c, m, n, mu, nu); - size_t qmax = c->A_multipliers[i+1] - c->A_multipliers[i] - 1; - assert(qmax == gaunt_q_max(-m,n,mu,nu)); - complex double sum, kahanc; - ckahaninit(&sum, &kahanc); - for(size_t q = 0; q <= qmax; ++q) { - int p = n+nu-2*q; - double Pp = legendre_buf[gsl_sf_legendre_array_index(p, abs(mu-m))]; - complex double zp = bessel_buf[p]; - complex double multiplier = c->A_multipliers[i][q]; - ckahanadd(&sum, &kahanc, Pp * zp * multiplier); - } - complex double eimf = cexp(I*(mu-m)*kdlj.phi); - return sum * eimf; + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J, + const complex double *bessel_buf, const double *legendre_buf) { + size_t i = qpms_trans_calculator_index_mnmunu(c, m, n, mu, nu); + size_t qmax = c->A_multipliers[i+1] - c->A_multipliers[i] - 1; + assert(qmax == gaunt_q_max(-m,n,mu,nu)); + complex double sum, kahanc; + ckahaninit(&sum, &kahanc); + for(size_t q = 0; q <= qmax; ++q) { + int p = n+nu-2*q; + double Pp = legendre_buf[gsl_sf_legendre_array_index(p, abs(mu-m))]; + complex double zp = bessel_buf[p]; + complex double multiplier = c->A_multipliers[i][q]; + ckahanadd(&sum, &kahanc, Pp * zp * multiplier); + } + complex double eimf = cexp(I*(mu-m)*kdlj.phi); + return sum * eimf; } complex double qpms_trans_calculator_get_A_buf(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J, - complex double *bessel_buf, double *legendre_buf) { - // This functions gets preallocated memory for bessel and legendre functions, but computes them itself - if (r_ge_d) J = QPMS_BESSEL_REGULAR; - if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) - // TODO warn? - return NAN+I*NAN; - int csphase = qpms_normalisation_t_csphase(c->normalisation); - switch(c->normalisation) { - // TODO use normalised legendre functions for Taylor and Kristensson - case QPMS_NORMALISATION_TAYLOR: - case QPMS_NORMALISATION_KRISTENSSON: - case QPMS_NORMALISATION_XU: - { - double costheta = cos(kdlj.theta); - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu, - costheta,csphase,legendre_buf)) abort(); - if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bessel_buf)) abort(); - return qpms_trans_calculator_get_A_precalcbuf(c,m,n,mu,nu, - kdlj,r_ge_d,J,bessel_buf,legendre_buf); - } - break; - default: - abort(); - } - assert(0); + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J, + complex double *bessel_buf, double *legendre_buf) { + // This functions gets preallocated memory for bessel and legendre functions, but computes them itself + if (r_ge_d) J = QPMS_BESSEL_REGULAR; + if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) + // TODO warn? + return NAN+I*NAN; + int csphase = qpms_normalisation_t_csphase(c->normalisation); + switch(c->normalisation) { + // TODO use normalised legendre functions for Taylor and Kristensson + case QPMS_NORMALISATION_TAYLOR: + case QPMS_NORMALISATION_KRISTENSSON: + case QPMS_NORMALISATION_XU: + { + double costheta = cos(kdlj.theta); + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu, + costheta,csphase,legendre_buf)) abort(); + if (qpms_sph_bessel_fill(J, n+nu+1, kdlj.r, bessel_buf)) abort(); + return qpms_trans_calculator_get_A_precalcbuf(c,m,n,mu,nu, + kdlj,r_ge_d,J,bessel_buf,legendre_buf); + } + break; + default: + abort(); + } + assert(0); } static inline complex double qpms_trans_calculator_get_B_precalcbuf(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J, - const complex double *bessel_buf, const double *legendre_buf) { - size_t i = qpms_trans_calculator_index_mnmunu(c, m, n, mu, nu); - size_t qmax = c->B_multipliers[i+1] - c->B_multipliers[i] - (1 - BQ_OFFSET); - assert(qmax == gauntB_Q_max(-m,n,mu,nu)); - complex double sum, kahanc; - ckahaninit(&sum, &kahanc); - for(int q = BQ_OFFSET; q <= qmax; ++q) { - int p = n+nu-2*q; - double Pp_ = legendre_buf[gsl_sf_legendre_array_index(p+1, abs(mu-m))]; - complex double zp_ = bessel_buf[p+1]; - complex double multiplier = c->B_multipliers[i][q-BQ_OFFSET]; - ckahanadd(&sum, &kahanc, Pp_ * zp_ * multiplier); - } - complex double eimf = cexp(I*(mu-m)*kdlj.phi); - return sum * eimf; + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J, + const complex double *bessel_buf, const double *legendre_buf) { + size_t i = qpms_trans_calculator_index_mnmunu(c, m, n, mu, nu); + size_t qmax = c->B_multipliers[i+1] - c->B_multipliers[i] - (1 - BQ_OFFSET); + assert(qmax == gauntB_Q_max(-m,n,mu,nu)); + complex double sum, kahanc; + ckahaninit(&sum, &kahanc); + for(int q = BQ_OFFSET; q <= qmax; ++q) { + int p = n+nu-2*q; + double Pp_ = legendre_buf[gsl_sf_legendre_array_index(p+1, abs(mu-m))]; + complex double zp_ = bessel_buf[p+1]; + complex double multiplier = c->B_multipliers[i][q-BQ_OFFSET]; + ckahanadd(&sum, &kahanc, Pp_ * zp_ * multiplier); + } + complex double eimf = cexp(I*(mu-m)*kdlj.phi); + return sum * eimf; } complex double qpms_trans_calculator_get_B_buf(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J, - complex double *bessel_buf, double *legendre_buf) { - // This functions gets preallocated memory for bessel and legendre functions, but computes them itself - if (r_ge_d) J = QPMS_BESSEL_REGULAR; - if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) - // TODO warn? - return NAN+I*NAN; - int csphase = qpms_normalisation_t_csphase(c->normalisation); - switch(c->normalisation) { - case QPMS_NORMALISATION_TAYLOR: - case QPMS_NORMALISATION_KRISTENSSON: - case QPMS_NORMALISATION_XU: - { - double costheta = cos(kdlj.theta); - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1, - costheta,csphase,legendre_buf)) abort(); - if (qpms_sph_bessel_fill(J, n+nu+2, kdlj.r, bessel_buf)) abort(); - return qpms_trans_calculator_get_B_precalcbuf(c,m,n,mu,nu, - kdlj,r_ge_d,J,bessel_buf,legendre_buf); - } - break; - default: - abort(); - } - assert(0); + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J, + complex double *bessel_buf, double *legendre_buf) { + // This functions gets preallocated memory for bessel and legendre functions, but computes them itself + if (r_ge_d) J = QPMS_BESSEL_REGULAR; + if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) + // TODO warn? + return NAN+I*NAN; + int csphase = qpms_normalisation_t_csphase(c->normalisation); + switch(c->normalisation) { + case QPMS_NORMALISATION_TAYLOR: + case QPMS_NORMALISATION_KRISTENSSON: + case QPMS_NORMALISATION_XU: + { + double costheta = cos(kdlj.theta); + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1, + costheta,csphase,legendre_buf)) abort(); + if (qpms_sph_bessel_fill(J, n+nu+2, kdlj.r, bessel_buf)) abort(); + return qpms_trans_calculator_get_B_precalcbuf(c,m,n,mu,nu, + kdlj,r_ge_d,J,bessel_buf,legendre_buf); + } + break; + default: + abort(); + } + assert(0); } int qpms_trans_calculator_get_AB_buf_p(const qpms_trans_calculator *c, - complex double *Adest, complex double *Bdest, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J, - complex double *bessel_buf, double *legendre_buf) { - if (r_ge_d) J = QPMS_BESSEL_REGULAR; - if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) { - *Adest = NAN+I*NAN; - *Bdest = NAN+I*NAN; - // TODO warn? different return value? - return 0; - } - switch(qpms_normalisation_t_normonly(c->normalisation)) { - case QPMS_NORMALISATION_TAYLOR: - case QPMS_NORMALISATION_KRISTENSSON: - case QPMS_NORMALISATION_XU: - { - double costheta = cos(kdlj.theta); - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1, - costheta,-1,legendre_buf)) abort(); - if (qpms_sph_bessel_fill(J, n+nu+2, kdlj.r, bessel_buf)) abort(); - *Adest = qpms_trans_calculator_get_A_precalcbuf(c,m,n,mu,nu, - kdlj,r_ge_d,J,bessel_buf,legendre_buf); - *Bdest = qpms_trans_calculator_get_B_precalcbuf(c,m,n,mu,nu, - kdlj,r_ge_d,J,bessel_buf,legendre_buf); - return 0; - } - break; - default: - abort(); - } - assert(0); + complex double *Adest, complex double *Bdest, + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J, + complex double *bessel_buf, double *legendre_buf) { + if (r_ge_d) J = QPMS_BESSEL_REGULAR; + if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) { + *Adest = NAN+I*NAN; + *Bdest = NAN+I*NAN; + // TODO warn? different return value? + return 0; + } + switch(qpms_normalisation_t_normonly(c->normalisation)) { + case QPMS_NORMALISATION_TAYLOR: + case QPMS_NORMALISATION_KRISTENSSON: + case QPMS_NORMALISATION_XU: + { + double costheta = cos(kdlj.theta); + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,n+nu+1, + costheta,-1,legendre_buf)) abort(); + if (qpms_sph_bessel_fill(J, n+nu+2, kdlj.r, bessel_buf)) abort(); + *Adest = qpms_trans_calculator_get_A_precalcbuf(c,m,n,mu,nu, + kdlj,r_ge_d,J,bessel_buf,legendre_buf); + *Bdest = qpms_trans_calculator_get_B_precalcbuf(c,m,n,mu,nu, + kdlj,r_ge_d,J,bessel_buf,legendre_buf); + return 0; + } + break; + default: + abort(); + } + assert(0); } int qpms_trans_calculator_get_AB_arrays_buf(const qpms_trans_calculator *c, - complex double *Adest, complex double *Bdest, - size_t deststride, size_t srcstride, - sph_t kdlj, bool r_ge_d, qpms_bessel_t J, - complex double *bessel_buf, double *legendre_buf) { - if (r_ge_d) J = QPMS_BESSEL_REGULAR; - if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) { - for (size_t i = 0; i < c->nelem; ++i) - for (size_t j = 0; j < c->nelem; ++j) { - *(Adest + i*srcstride + j*deststride) = NAN+I*NAN; - *(Bdest + i*srcstride + j*deststride) = NAN+I*NAN; - } - // TODO warn? different return value? - return 0; - } - switch(c->normalisation) { - case QPMS_NORMALISATION_TAYLOR: - { - double costheta = cos(kdlj.theta); - if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,2*c->lMax+1, - costheta,-1,legendre_buf)) abort(); - if (qpms_sph_bessel_fill(J, 2*c->lMax+2, kdlj.r, bessel_buf)) abort(); - size_t desti = 0, srci = 0; - for (int n = 1; n <= c->lMax; ++n) for (int m = -n; m <= n; ++m) { - for (int nu = 1; nu <= c->lMax; ++nu) for (int mu = -nu; mu <= nu; ++mu) { - size_t assertindex = qpms_trans_calculator_index_mnmunu(c,m,n,mu,nu); - assert(assertindex == desti*c->nelem + srci); - *(Adest + deststride * desti + srcstride * srci) = - qpms_trans_calculator_get_A_precalcbuf(c,m,n,mu,nu, - kdlj,r_ge_d,J,bessel_buf,legendre_buf); - *(Bdest + deststride * desti + srcstride * srci) = - qpms_trans_calculator_get_B_precalcbuf(c,m,n,mu,nu, - kdlj,r_ge_d,J,bessel_buf,legendre_buf); - ++srci; - } - ++desti; - srci = 0; - } - return 0; - } - break; - default: - abort(); - } - assert(0); + complex double *Adest, complex double *Bdest, + size_t deststride, size_t srcstride, + sph_t kdlj, bool r_ge_d, qpms_bessel_t J, + complex double *bessel_buf, double *legendre_buf) { + if (r_ge_d) J = QPMS_BESSEL_REGULAR; + if (0 == kdlj.r && J != QPMS_BESSEL_REGULAR) { + for (size_t i = 0; i < c->nelem; ++i) + for (size_t j = 0; j < c->nelem; ++j) { + *(Adest + i*srcstride + j*deststride) = NAN+I*NAN; + *(Bdest + i*srcstride + j*deststride) = NAN+I*NAN; + } + // TODO warn? different return value? + return 0; + } + switch(c->normalisation) { + case QPMS_NORMALISATION_TAYLOR: + { + double costheta = cos(kdlj.theta); + if (gsl_sf_legendre_array_e(GSL_SF_LEGENDRE_NONE,2*c->lMax+1, + costheta,-1,legendre_buf)) abort(); + if (qpms_sph_bessel_fill(J, 2*c->lMax+2, kdlj.r, bessel_buf)) abort(); + size_t desti = 0, srci = 0; + for (int n = 1; n <= c->lMax; ++n) for (int m = -n; m <= n; ++m) { + for (int nu = 1; nu <= c->lMax; ++nu) for (int mu = -nu; mu <= nu; ++mu) { + size_t assertindex = qpms_trans_calculator_index_mnmunu(c,m,n,mu,nu); + assert(assertindex == desti*c->nelem + srci); + *(Adest + deststride * desti + srcstride * srci) = + qpms_trans_calculator_get_A_precalcbuf(c,m,n,mu,nu, + kdlj,r_ge_d,J,bessel_buf,legendre_buf); + *(Bdest + deststride * desti + srcstride * srci) = + qpms_trans_calculator_get_B_precalcbuf(c,m,n,mu,nu, + kdlj,r_ge_d,J,bessel_buf,legendre_buf); + ++srci; + } + ++desti; + srci = 0; + } + return 0; + } + break; + default: + abort(); + } + assert(0); } complex double qpms_trans_calculator_get_A(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { - double leg[gsl_sf_legendre_array_n(n+nu)]; - complex double bes[n+nu+1]; - return qpms_trans_calculator_get_A_buf(c,m,n,mu,nu,kdlj,r_ge_d,J, - bes,leg); + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J) { + double leg[gsl_sf_legendre_array_n(n+nu)]; + complex double bes[n+nu+1]; + return qpms_trans_calculator_get_A_buf(c,m,n,mu,nu,kdlj,r_ge_d,J, + bes,leg); } complex double qpms_trans_calculator_get_B(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { - double leg[gsl_sf_legendre_array_n(n+nu+1)]; - complex double bes[n+nu+2]; - return qpms_trans_calculator_get_B_buf(c,m,n,mu,nu,kdlj,r_ge_d,J, - bes,leg); + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J) { + double leg[gsl_sf_legendre_array_n(n+nu+1)]; + complex double bes[n+nu+2]; + return qpms_trans_calculator_get_B_buf(c,m,n,mu,nu,kdlj,r_ge_d,J, + bes,leg); } int qpms_trans_calculator_get_AB_p(const qpms_trans_calculator *c, - complex double *Adest, complex double *Bdest, - int m, int n, int mu, int nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J) { - double leg[gsl_sf_legendre_array_n(2*c->lMax+1)]; - complex double bes[2*c->lMax+2]; - return qpms_trans_calculator_get_AB_buf_p(c,Adest, Bdest,m,n,mu,nu,kdlj,r_ge_d,J, - bes,leg); + complex double *Adest, complex double *Bdest, + int m, int n, int mu, int nu, sph_t kdlj, + bool r_ge_d, qpms_bessel_t J) { + double leg[gsl_sf_legendre_array_n(2*c->lMax+1)]; + complex double bes[2*c->lMax+2]; + return qpms_trans_calculator_get_AB_buf_p(c,Adest, Bdest,m,n,mu,nu,kdlj,r_ge_d,J, + bes,leg); } int qpms_trans_calculator_get_AB_arrays(const qpms_trans_calculator *c, - complex double *Adest, complex double *Bdest, - size_t deststride, size_t srcstride, - sph_t kdlj, bool r_ge_d, qpms_bessel_t J) { - double leg[gsl_sf_legendre_array_n(c->lMax+c->lMax+1)]; - complex double bes[c->lMax+c->lMax+2]; - return qpms_trans_calculator_get_AB_arrays_buf(c, - Adest, Bdest, deststride, srcstride, - kdlj, r_ge_d, J, - bes, leg); + complex double *Adest, complex double *Bdest, + size_t deststride, size_t srcstride, + sph_t kdlj, bool r_ge_d, qpms_bessel_t J) { + double leg[gsl_sf_legendre_array_n(c->lMax+c->lMax+1)]; + complex double bes[c->lMax+c->lMax+2]; + return qpms_trans_calculator_get_AB_arrays_buf(c, + Adest, Bdest, deststride, srcstride, + kdlj, r_ge_d, J, + bes, leg); } complex double qpms_trans_calculator_get_A_ext(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, - double kdlj_r, double kdlj_theta, double kdlj_phi, - int r_ge_d, int J) { - sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; - return qpms_trans_calculator_get_A(c,m,n,mu,nu,kdlj,r_ge_d,J); + int m, int n, int mu, int nu, + double kdlj_r, double kdlj_theta, double kdlj_phi, + int r_ge_d, int J) { + sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; + return qpms_trans_calculator_get_A(c,m,n,mu,nu,kdlj,r_ge_d,J); } complex double qpms_trans_calculator_get_B_ext(const qpms_trans_calculator *c, - int m, int n, int mu, int nu, - double kdlj_r, double kdlj_theta, double kdlj_phi, - int r_ge_d, int J) { - sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; - return qpms_trans_calculator_get_B(c,m,n,mu,nu,kdlj,r_ge_d,J); + int m, int n, int mu, int nu, + double kdlj_r, double kdlj_theta, double kdlj_phi, + int r_ge_d, int J) { + sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; + return qpms_trans_calculator_get_B(c,m,n,mu,nu,kdlj,r_ge_d,J); } int qpms_trans_calculator_get_AB_p_ext(const qpms_trans_calculator *c, - complex double *Adest, complex double *Bdest, - int m, int n, int mu, int nu, - double kdlj_r, double kdlj_theta, double kdlj_phi, - int r_ge_d, int J) { - sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; - return qpms_trans_calculator_get_AB_p(c,Adest,Bdest,m,n,mu,nu,kdlj,r_ge_d,J); + complex double *Adest, complex double *Bdest, + int m, int n, int mu, int nu, + double kdlj_r, double kdlj_theta, double kdlj_phi, + int r_ge_d, int J) { + sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; + return qpms_trans_calculator_get_AB_p(c,Adest,Bdest,m,n,mu,nu,kdlj,r_ge_d,J); } int qpms_trans_calculator_get_AB_arrays_ext(const qpms_trans_calculator *c, - complex double *Adest, complex double *Bdest, - size_t deststride, size_t srcstride, - double kdlj_r, double kdlj_theta, double kdlj_phi, - int r_ge_d, int J) { - sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; - return qpms_trans_calculator_get_AB_arrays(c,Adest,Bdest,deststride,srcstride, - kdlj, r_ge_d, J); + complex double *Adest, complex double *Bdest, + size_t deststride, size_t srcstride, + double kdlj_r, double kdlj_theta, double kdlj_phi, + int r_ge_d, int J) { + sph_t kdlj = {kdlj_r, kdlj_theta, kdlj_phi}; + return qpms_trans_calculator_get_AB_arrays(c,Adest,Bdest,deststride,srcstride, + kdlj, r_ge_d, J); } #ifdef QPMS_COMPILE_PYTHON_EXTENSIONS #include @@ -1138,111 +1071,111 @@ int qpms_trans_calculator_get_AB_arrays_ext(const qpms_trans_calculator *c, #endif int qpms_cython_trans_calculator_get_AB_arrays_loop( - const qpms_trans_calculator *c, const qpms_bessel_t J, const int resnd, - const int daxis, const int saxis, - char *A_data, const npy_intp *A_shape, const npy_intp *A_strides, - char *B_data, const npy_intp *B_shape, const npy_intp *B_strides, - const char *r_data, const npy_intp *r_shape, const npy_intp *r_strides, - const char *theta_data, const npy_intp *theta_shape, const npy_intp *theta_strides, - const char *phi_data, const npy_intp *phi_shape, const npy_intp *phi_strides, - const char *r_ge_d_data, const npy_intp *r_ge_d_shape, const npy_intp *r_ge_d_strides){ - assert(daxis != saxis); - assert(resnd >= 2); - int longest_axis = 0; - int longestshape = 1; - const npy_intp *resultshape = A_shape, *resultstrides = A_strides; - // TODO put some restrict's everywhere? - for (int ax = 0; ax < resnd; ++ax){ - assert(A_shape[ax] == B_shape[ax]); - assert(A_strides[ax] == B_strides[ax]); - if (daxis == ax || saxis == ax) continue; - if (A_shape[ax] > longestshape) { - longest_axis = ax; - longestshape = 1; - } - } - const npy_intp longlen = resultshape[longest_axis]; + const qpms_trans_calculator *c, const qpms_bessel_t J, const int resnd, + const int daxis, const int saxis, + char *A_data, const npy_intp *A_shape, const npy_intp *A_strides, + char *B_data, const npy_intp *B_shape, const npy_intp *B_strides, + const char *r_data, const npy_intp *r_shape, const npy_intp *r_strides, + const char *theta_data, const npy_intp *theta_shape, const npy_intp *theta_strides, + const char *phi_data, const npy_intp *phi_shape, const npy_intp *phi_strides, + const char *r_ge_d_data, const npy_intp *r_ge_d_shape, const npy_intp *r_ge_d_strides){ + assert(daxis != saxis); + assert(resnd >= 2); + int longest_axis = 0; + int longestshape = 1; + const npy_intp *resultshape = A_shape, *resultstrides = A_strides; + // TODO put some restrict's everywhere? + for (int ax = 0; ax < resnd; ++ax){ + assert(A_shape[ax] == B_shape[ax]); + assert(A_strides[ax] == B_strides[ax]); + if (daxis == ax || saxis == ax) continue; + if (A_shape[ax] > longestshape) { + longest_axis = ax; + longestshape = 1; + } + } + const npy_intp longlen = resultshape[longest_axis]; - npy_intp innerloop_shape[resnd]; - for (int ax = 0; ax < resnd; ++ax) { - innerloop_shape[ax] = resultshape[ax]; - } - /* longest axis will be iterated in the outer (parallelized) loop. - * Therefore, longest axis, together with saxis and daxis, - * will not be iterated in the inner loop: - */ - innerloop_shape[longest_axis] = 1; - innerloop_shape[daxis] = 1; - innerloop_shape[saxis] = 1; + npy_intp innerloop_shape[resnd]; + for (int ax = 0; ax < resnd; ++ax) { + innerloop_shape[ax] = resultshape[ax]; + } + /* longest axis will be iterated in the outer (parallelized) loop. + * Therefore, longest axis, together with saxis and daxis, + * will not be iterated in the inner loop: + */ + innerloop_shape[longest_axis] = 1; + innerloop_shape[daxis] = 1; + innerloop_shape[saxis] = 1; - // these are the 'strides' passed to the qpms_trans_calculator_get_AB_arrays_ext - // function, which expects 'const double *' strides, not 'char *' ones. - const npy_intp dstride = resultstrides[daxis] / sizeof(complex double); - const npy_intp sstride = resultstrides[saxis] / sizeof(complex double); + // these are the 'strides' passed to the qpms_trans_calculator_get_AB_arrays_ext + // function, which expects 'const double *' strides, not 'char *' ones. + const npy_intp dstride = resultstrides[daxis] / sizeof(complex double); + const npy_intp sstride = resultstrides[saxis] / sizeof(complex double); - int errval = 0; - // TODO here start parallelisation -//#pragma omp parallel - { - npy_intp local_indices[resnd]; - memset(local_indices, 0, sizeof(local_indices)); - int errval_local = 0; - size_t longi; -//#pragma omp for - for(longi = 0; longi < longlen; ++longi) { - // this might be done also in the inverse order, but this is more - // 'c-contiguous' way of incrementing the indices - int ax = resnd - 1; - while(ax >= 0) { - /* calculate the correct index/pointer for each array used. - * This can be further optimized from O(resnd * total size of - * the result array) to O(total size of the result array), but - * fick that now - */ - const char *r_p = r_data + r_strides[longest_axis] * longi; - const char *theta_p = theta_data + theta_strides[longest_axis] * longi; - const char *phi_p = phi_data + phi_strides[longest_axis] * longi; - const char *r_ge_d_p = r_ge_d_data + r_ge_d_strides[longest_axis] * longi; - char *A_p = A_data + A_strides[longest_axis] * longi; - char *B_p = B_data + B_strides[longest_axis] * longi; - for(int i = 0; i < resnd; ++i) { - // following two lines are probably not needed, as innerloop_shape is there 1 anyway - // so if i == daxis, saxis, or longest_axis, local_indices[i] is zero. - if (i == longest_axis) continue; - if (daxis == i || saxis == i) continue; - r_p += r_strides[i] * local_indices[i]; - theta_p += theta_strides[i] * local_indices[i]; - phi_p += phi_strides[i] * local_indices[i]; - A_p += A_strides[i] * local_indices[i]; - B_p += B_strides[i] * local_indices[i]; - } + int errval = 0; + // TODO here start parallelisation + //#pragma omp parallel + { + npy_intp local_indices[resnd]; + memset(local_indices, 0, sizeof(local_indices)); + int errval_local = 0; + size_t longi; + //#pragma omp for + for(longi = 0; longi < longlen; ++longi) { + // this might be done also in the inverse order, but this is more + // 'c-contiguous' way of incrementing the indices + int ax = resnd - 1; + while(ax >= 0) { + /* calculate the correct index/pointer for each array used. + * This can be further optimized from O(resnd * total size of + * the result array) to O(total size of the result array), but + * fick that now + */ + const char *r_p = r_data + r_strides[longest_axis] * longi; + const char *theta_p = theta_data + theta_strides[longest_axis] * longi; + const char *phi_p = phi_data + phi_strides[longest_axis] * longi; + const char *r_ge_d_p = r_ge_d_data + r_ge_d_strides[longest_axis] * longi; + char *A_p = A_data + A_strides[longest_axis] * longi; + char *B_p = B_data + B_strides[longest_axis] * longi; + for(int i = 0; i < resnd; ++i) { + // following two lines are probably not needed, as innerloop_shape is there 1 anyway + // so if i == daxis, saxis, or longest_axis, local_indices[i] is zero. + if (i == longest_axis) continue; + if (daxis == i || saxis == i) continue; + r_p += r_strides[i] * local_indices[i]; + theta_p += theta_strides[i] * local_indices[i]; + phi_p += phi_strides[i] * local_indices[i]; + A_p += A_strides[i] * local_indices[i]; + B_p += B_strides[i] * local_indices[i]; + } - // perform the actual task here - errval_local |= qpms_trans_calculator_get_AB_arrays_ext(c, (complex double *)A_p, - (complex double *)B_p, - dstride, sstride, - // FIXME change all the _ext function types to npy_... so that - // these casts are not needed - *((double *) r_p), *((double *) theta_p), *((double *)phi_p), - (int)(*((npy_bool *) r_ge_d_p)), J); - if (errval_local) abort(); + // perform the actual task here + errval_local |= qpms_trans_calculator_get_AB_arrays_ext(c, (complex double *)A_p, + (complex double *)B_p, + dstride, sstride, + // FIXME change all the _ext function types to npy_... so that + // these casts are not needed + *((double *) r_p), *((double *) theta_p), *((double *)phi_p), + (int)(*((npy_bool *) r_ge_d_p)), J); + if (errval_local) abort(); - // increment the last index 'digit' (ax is now resnd-1; we don't have do-while loop in python) - ++local_indices[ax]; - while(local_indices[ax] == innerloop_shape[ax] && ax >= 0) { - // overflow to the next digit but stop when reached below the last one - local_indices[ax] = 0; - local_indices[--ax]++; - } - if (ax >= 0) // did not overflow, get back to the lowest index - ax = resnd - 1; - } - } - errval |= errval_local; - } - // FIXME when parallelizing - // TODO Here end parallelisation - return errval; + // increment the last index 'digit' (ax is now resnd-1; we don't have do-while loop in python) + ++local_indices[ax]; + while(local_indices[ax] == innerloop_shape[ax] && ax >= 0) { + // overflow to the next digit but stop when reached below the last one + local_indices[ax] = 0; + local_indices[--ax]++; + } + if (ax >= 0) // did not overflow, get back to the lowest index + ax = resnd - 1; + } + } + errval |= errval_local; + } + // FIXME when parallelizing + // TODO Here end parallelisation + return errval; } diff --git a/qpms/translations.h b/qpms/translations.h index b63593d..2b8a772 100644 --- a/qpms/translations.h +++ b/qpms/translations.h @@ -9,10 +9,10 @@ // TODO replace the xplicit "Taylor" functions with general, // taking qpms_normalisation_t argument. complex double qpms_trans_single_A_Taylor(qpms_m_t m, qpms_l_t n, qpms_m_t mu, qpms_l_t nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J); + bool r_ge_d, qpms_bessel_t J); complex double qpms_trans_single_B_Taylor(qpms_m_t m, qpms_l_t n, qpms_m_t mu, qpms_l_t nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J); + bool r_ge_d, qpms_bessel_t J); complex double qpms_trans_single_A_Taylor_ext(qpms_m_t m, qpms_l_t n, qpms_m_t mu, qpms_l_t nu, double kdlj_r, double kdlj_th, double kdlj_phi, int r_ge_d, int J); @@ -21,17 +21,17 @@ complex double qpms_trans_single_B_Taylor_ext(qpms_m_t m, qpms_l_t n, qpms_m_t m double kdlj_th, double kdlj_phi, int r_ge_d, int J); complex double qpms_trans_single_A(qpms_normalisation_t norm, qpms_m_t m, qpms_l_t n, qpms_m_t mu, qpms_l_t nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J); + bool r_ge_d, qpms_bessel_t J); complex double qpms_trans_single_B(qpms_normalisation_t norm, qpms_m_t m, qpms_l_t n, qpms_m_t mu, qpms_l_t nu, sph_t kdlj, - bool r_ge_d, qpms_bessel_t J); + bool r_ge_d, qpms_bessel_t J); typedef struct qpms_trans_calculator { - qpms_normalisation_t normalisation; - qpms_l_t lMax; - qpms_y_t nelem; - complex double **A_multipliers; - complex double **B_multipliers; + qpms_normalisation_t normalisation; + qpms_l_t lMax; + qpms_y_t nelem; + complex double **A_multipliers; + complex double **B_multipliers; #if 0 // Normalised values of the Legendre functions and derivatives // for θ == π/2, i.e. for the 2D case. @@ -86,14 +86,14 @@ int qpms_trans_calculator_get_AB_arrays_ext(const qpms_trans_calculator *c, #include #include int qpms_cython_trans_calculator_get_AB_arrays_loop( - const qpms_trans_calculator *c, qpms_bessel_t J, const int resnd, - int daxis, int saxis, - char *A_data, const npy_intp *A_shape, const npy_intp *A_strides, - char *B_data, const npy_intp *B_shape, const npy_intp *B_strides, - const char *r_data, const npy_intp *r_shape, const npy_intp *r_strides, - const char *theta_data, const npy_intp *theta_shape, const npy_intp *theta_strides, - const char *phi_data, const npy_intp *phi_shape, const npy_intp *phi_strides, - const char *r_ge_d_data, const npy_intp *r_ge_d_shape, const npy_intp *r_ge_d_strides); + const qpms_trans_calculator *c, qpms_bessel_t J, const int resnd, + int daxis, int saxis, + char *A_data, const npy_intp *A_shape, const npy_intp *A_strides, + char *B_data, const npy_intp *B_shape, const npy_intp *B_strides, + const char *r_data, const npy_intp *r_shape, const npy_intp *r_strides, + const char *theta_data, const npy_intp *theta_shape, const npy_intp *theta_strides, + const char *phi_data, const npy_intp *phi_shape, const npy_intp *phi_strides, + const char *r_ge_d_data, const npy_intp *r_ge_d_shape, const npy_intp *r_ge_d_strides); #endif //QPMS_COMPILE_PYTHON_EXTENSIONS diff --git a/qpms/vecprint.c b/qpms/vecprint.c index 8b33506..96599fc 100644 --- a/qpms/vecprint.c +++ b/qpms/vecprint.c @@ -5,29 +5,29 @@ void print_csphvec(csphvec_t v) { - printf("(%g+%gj)r̂ + (%g+%gj)θ̂ + (%g+%gj)φ̂", - creal(v.rc), cimag(v.rc), - creal(v.thetac), cimag(v.thetac), - creal(v.phic), cimag(v.phic) - ); + printf("(%g+%gj)r̂ + (%g+%gj)θ̂ + (%g+%gj)φ̂", + creal(v.rc), cimag(v.rc), + creal(v.thetac), cimag(v.thetac), + creal(v.phic), cimag(v.phic) + ); } void print_cart3(cart3_t v) { - printf("%gx̂ + %gŷ + %gẑ", v.x, v.y, v.z); + printf("%gx̂ + %gŷ + %gẑ", v.x, v.y, v.z); } void print_ccart3(ccart3_t v) { - printf("(%g+%gj)x̂ + (%g+%gj)ŷ + (%g+%gj)ẑ", - creal(v.x), cimag(v.x), - creal(v.y), cimag(v.y), - creal(v.z), cimag(v.z) - ); + printf("(%g+%gj)x̂ + (%g+%gj)ŷ + (%g+%gj)ẑ", + creal(v.x), cimag(v.x), + creal(v.y), cimag(v.y), + creal(v.z), cimag(v.z) + ); } void print_sph(sph_t r) { - printf("(r=%g, θ=%g, φ=%g)", r.r, r.theta, r.phi); + printf("(r=%g, θ=%g, φ=%g)", r.r, r.theta, r.phi); } diff --git a/qpms/vswf.c b/qpms/vswf.c index 75e9d6a..a219a73 100644 --- a/qpms/vswf.c +++ b/qpms/vswf.c @@ -9,314 +9,314 @@ #include csphvec_t qpms_vswf_single_el(qpms_m_t m, qpms_l_t l, sph_t kdlj, - qpms_bessel_t btyp, qpms_normalisation_t norm) { - lmcheck(l,m); - csphvec_t N; - complex double *bessel = malloc((l+1)*sizeof(complex double)); - if(qpms_sph_bessel_fill(btyp, l, kdlj.r, bessel)) abort(); - qpms_pitau_t pt = qpms_pitau_get(kdlj.theta, l, norm); - complex double eimf = cexp(m * kdlj.phi * I); - qpms_y_t y = qpms_mn2y(m,l); + qpms_bessel_t btyp, qpms_normalisation_t norm) { + lmcheck(l,m); + csphvec_t N; + complex double *bessel = malloc((l+1)*sizeof(complex double)); + if(qpms_sph_bessel_fill(btyp, l, kdlj.r, bessel)) abort(); + qpms_pitau_t pt = qpms_pitau_get(kdlj.theta, l, norm); + complex double eimf = cexp(m * kdlj.phi * I); + qpms_y_t y = qpms_mn2y(m,l); - N.rc = l*(l+1) * pt.leg[y] * bessel[l] / kdlj.r * eimf; - complex double besselfac = bessel[l-1] - l * bessel[l] / kdlj.r; - N.thetac = pt.tau[y] * besselfac * eimf; - N.phic = pt.pi[y] * besselfac * I * eimf; + N.rc = l*(l+1) * pt.leg[y] * bessel[l] / kdlj.r * eimf; + complex double besselfac = bessel[l-1] - l * bessel[l] / kdlj.r; + N.thetac = pt.tau[y] * besselfac * eimf; + N.phic = pt.pi[y] * besselfac * I * eimf; - qpms_pitau_free(pt); - free(bessel); - return N; + qpms_pitau_free(pt); + free(bessel); + return N; } csphvec_t qpms_vswf_single_mg(qpms_m_t m, qpms_l_t l, sph_t kdlj, - qpms_bessel_t btyp, qpms_normalisation_t norm) { - lmcheck(l,m); - csphvec_t M; - complex double *bessel = malloc((l+1)*sizeof(complex double)); - if(qpms_sph_bessel_fill(btyp, l, kdlj.r, bessel)) abort(); - qpms_pitau_t pt = qpms_pitau_get(kdlj.theta, l, norm); - complex double eimf = cexp(m * kdlj.phi * I); - qpms_y_t y = qpms_mn2y(m,l); + qpms_bessel_t btyp, qpms_normalisation_t norm) { + lmcheck(l,m); + csphvec_t M; + complex double *bessel = malloc((l+1)*sizeof(complex double)); + if(qpms_sph_bessel_fill(btyp, l, kdlj.r, bessel)) abort(); + qpms_pitau_t pt = qpms_pitau_get(kdlj.theta, l, norm); + complex double eimf = cexp(m * kdlj.phi * I); + qpms_y_t y = qpms_mn2y(m,l); - M.rc = 0.; - M.thetac = pt.pi[y] * bessel[l] * I * eimf; - M.phic = -pt.tau[y] * bessel[l] * eimf; + M.rc = 0.; + M.thetac = pt.pi[y] * bessel[l] * I * eimf; + M.phic = -pt.tau[y] * bessel[l] * eimf; - qpms_pitau_free(pt); - free(bessel); - return M; + qpms_pitau_free(pt); + free(bessel); + return M; } qpms_vswfset_sph_t *qpms_vswfset_make(qpms_l_t lMax, sph_t kdlj, - qpms_bessel_t btyp, qpms_normalisation_t norm) { - qpms_vswfset_sph_t *res = malloc(sizeof(qpms_vswfset_sph_t)); - res->lMax = lMax; - qpms_y_t nelem = qpms_lMax2nelem(lMax); - res->el = malloc(sizeof(csphvec_t)*nelem); - res->mg = malloc(sizeof(csphvec_t)*nelem); - if(QPMS_SUCCESS != qpms_vswf_fill(NULL, res->mg, res->el, lMax, kdlj, btyp, norm)) - abort(); // or return NULL? or rather assert? - return res; + qpms_bessel_t btyp, qpms_normalisation_t norm) { + qpms_vswfset_sph_t *res = malloc(sizeof(qpms_vswfset_sph_t)); + res->lMax = lMax; + qpms_y_t nelem = qpms_lMax2nelem(lMax); + res->el = malloc(sizeof(csphvec_t)*nelem); + res->mg = malloc(sizeof(csphvec_t)*nelem); + if(QPMS_SUCCESS != qpms_vswf_fill(NULL, res->mg, res->el, lMax, kdlj, btyp, norm)) + abort(); // or return NULL? or rather assert? + return res; } void qpms_vswfset_sph_pfree(qpms_vswfset_sph_t *w) { - assert(NULL != w && NULL != w->el && NULL != w->mg); - free(w->el); - free(w->mg); - free(w); + assert(NULL != w && NULL != w->el && NULL != w->mg); + free(w->el); + free(w->mg); + free(w); } qpms_errno_t qpms_vswf_fill(csphvec_t *const longtarget, csphvec_t * const mgtarget, csphvec_t * const eltarget, - qpms_l_t lMax, sph_t kr, - qpms_bessel_t btyp, qpms_normalisation_t norm) { - assert(lMax >= 1); - complex double *bessel = malloc((lMax+1)*sizeof(complex double)); - if(qpms_sph_bessel_fill(btyp, lMax, kr.r, bessel)) abort(); - qpms_pitau_t pt = qpms_pitau_get(kr.theta, lMax, norm); - complex double const *pbes = bessel + 1; // starting from l = 1 - double const *pleg = pt.leg; - double const *ppi = pt.pi; - double const *ptau = pt.tau; - csphvec_t *plong = longtarget, *pmg = mgtarget, *pel = eltarget; - for(qpms_l_t l = 1; l <= lMax; ++l) { - complex double besfac; - complex double besderfac; - if (kr.r) { - besfac = *pbes / kr.r; - } else { - besfac = (1 == l) ? 1/3. : 0; - } - besderfac = *(pbes-1) - l * besfac; - for(qpms_m_t m = -l; m <= l; ++m) { - complex double eimf = cexp(m * kr.phi * I); - if (longtarget) { - complex double longfac = sqrt(l*(l+1)) * eimf; - plong->rc = // FATAL FIXME: I get wrong result here for plane wave re-expansion - // whenever kr.r > 0 (for waves with longitudinal component, ofcoz) - /*(*(pbes-1) - (l+1)/kr.r* *pbes)*/ - (besderfac-besfac) - * (*pleg) * longfac; - plong->thetac = *ptau * besfac * longfac; - plong->phic = *ppi * I * besfac * longfac; - ++plong; - } - if (eltarget) { - pel->rc = l*(l+1) * (*pleg) * besfac * eimf; - pel->thetac = *ptau * besderfac * eimf; - pel->phic = *ppi * besderfac * I * eimf; - ++pel; - } - if (mgtarget) { - pmg->rc = 0.; - pmg->thetac = *ppi * (*pbes) * I * eimf; - pmg->phic = - *ptau * (*pbes) * eimf; - ++pmg; - } - ++pleg; ++ppi; ++ptau; - } - ++pbes; - } - free(bessel); - qpms_pitau_free(pt); - return QPMS_SUCCESS; + qpms_l_t lMax, sph_t kr, + qpms_bessel_t btyp, qpms_normalisation_t norm) { + assert(lMax >= 1); + complex double *bessel = malloc((lMax+1)*sizeof(complex double)); + if(qpms_sph_bessel_fill(btyp, lMax, kr.r, bessel)) abort(); + qpms_pitau_t pt = qpms_pitau_get(kr.theta, lMax, norm); + complex double const *pbes = bessel + 1; // starting from l = 1 + double const *pleg = pt.leg; + double const *ppi = pt.pi; + double const *ptau = pt.tau; + csphvec_t *plong = longtarget, *pmg = mgtarget, *pel = eltarget; + for(qpms_l_t l = 1; l <= lMax; ++l) { + complex double besfac; + complex double besderfac; + if (kr.r) { + besfac = *pbes / kr.r; + } else { + besfac = (1 == l) ? 1/3. : 0; + } + besderfac = *(pbes-1) - l * besfac; + for(qpms_m_t m = -l; m <= l; ++m) { + complex double eimf = cexp(m * kr.phi * I); + if (longtarget) { + complex double longfac = sqrt(l*(l+1)) * eimf; + plong->rc = // FATAL FIXME: I get wrong result here for plane wave re-expansion + // whenever kr.r > 0 (for waves with longitudinal component, ofcoz) + /*(*(pbes-1) - (l+1)/kr.r* *pbes)*/ + (besderfac-besfac) + * (*pleg) * longfac; + plong->thetac = *ptau * besfac * longfac; + plong->phic = *ppi * I * besfac * longfac; + ++plong; + } + if (eltarget) { + pel->rc = l*(l+1) * (*pleg) * besfac * eimf; + pel->thetac = *ptau * besderfac * eimf; + pel->phic = *ppi * besderfac * I * eimf; + ++pel; + } + if (mgtarget) { + pmg->rc = 0.; + pmg->thetac = *ppi * (*pbes) * I * eimf; + pmg->phic = - *ptau * (*pbes) * eimf; + ++pmg; + } + ++pleg; ++ppi; ++ptau; + } + ++pbes; + } + free(bessel); + qpms_pitau_free(pt); + return QPMS_SUCCESS; } // consistency check: this should give the same results as the above function (up to rounding errors) qpms_errno_t qpms_vswf_fill_alternative(csphvec_t *const longtarget, csphvec_t * const mgtarget, csphvec_t * const eltarget, - qpms_l_t lMax, sph_t kr, - qpms_bessel_t btyp, qpms_normalisation_t norm) { - assert(lMax >= 1); - complex double *bessel = malloc((lMax+1)*sizeof(complex double)); - if(qpms_sph_bessel_fill(btyp, lMax, kr.r, bessel)) abort(); - complex double const *pbes = bessel + 1; // starting from l = 1 - - qpms_y_t nelem = qpms_lMax2nelem(lMax); - csphvec_t * const a1 = malloc(3*nelem*sizeof(csphvec_t)), * const a2 = a1 + nelem, * const a3 = a2 + nelem; - if(qpms_vecspharm_fill(a1, a2, a3, lMax, kr, norm)) abort(); - const csphvec_t *p1 = a1; - const csphvec_t *p2 = a2; - const csphvec_t *p3 = a3; + qpms_l_t lMax, sph_t kr, + qpms_bessel_t btyp, qpms_normalisation_t norm) { + assert(lMax >= 1); + complex double *bessel = malloc((lMax+1)*sizeof(complex double)); + if(qpms_sph_bessel_fill(btyp, lMax, kr.r, bessel)) abort(); + complex double const *pbes = bessel + 1; // starting from l = 1 - csphvec_t *plong = longtarget, *pmg = mgtarget, *pel = eltarget; - for(qpms_l_t l = 1; l <= lMax; ++l) { - complex double besfac = *pbes / kr.r; - complex double besderfac = *(pbes-1) - l * besfac; - double sqrtlfac = sqrt(l*(l+1)); - for(qpms_m_t m = -l; m <= l; ++m) { - complex double eimf = cexp(m * kr.phi * I); - if (longtarget) { - *plong = csphvec_add(csphvec_scale(besderfac-besfac, *p3), - csphvec_scale(sqrtlfac * besfac, *p2)); - ++plong; - } - if (eltarget) { - *pel = csphvec_add(csphvec_scale(besderfac, *p2), - csphvec_scale(sqrtlfac * besfac, *p3)); - ++pel; - } - if (mgtarget) { - *pmg = csphvec_scale(*pbes, *p1); - ++pmg; - } - ++p1; ++p2; ++p3; - } - ++pbes; - } - free(a1); - free(bessel); - return QPMS_SUCCESS; + qpms_y_t nelem = qpms_lMax2nelem(lMax); + csphvec_t * const a1 = malloc(3*nelem*sizeof(csphvec_t)), * const a2 = a1 + nelem, * const a3 = a2 + nelem; + if(qpms_vecspharm_fill(a1, a2, a3, lMax, kr, norm)) abort(); + const csphvec_t *p1 = a1; + const csphvec_t *p2 = a2; + const csphvec_t *p3 = a3; + + csphvec_t *plong = longtarget, *pmg = mgtarget, *pel = eltarget; + for(qpms_l_t l = 1; l <= lMax; ++l) { + complex double besfac = *pbes / kr.r; + complex double besderfac = *(pbes-1) - l * besfac; + double sqrtlfac = sqrt(l*(l+1)); + for(qpms_m_t m = -l; m <= l; ++m) { + complex double eimf = cexp(m * kr.phi * I); + if (longtarget) { + *plong = csphvec_add(csphvec_scale(besderfac-besfac, *p3), + csphvec_scale(sqrtlfac * besfac, *p2)); + ++plong; + } + if (eltarget) { + *pel = csphvec_add(csphvec_scale(besderfac, *p2), + csphvec_scale(sqrtlfac * besfac, *p3)); + ++pel; + } + if (mgtarget) { + *pmg = csphvec_scale(*pbes, *p1); + ++pmg; + } + ++p1; ++p2; ++p3; + } + ++pbes; + } + free(a1); + free(bessel); + return QPMS_SUCCESS; } qpms_errno_t qpms_vecspharm_fill(csphvec_t *const a1target, csphvec_t *const a2target, csphvec_t *const a3target, - qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm) { - assert(lMax >= 1); - qpms_pitau_t pt = qpms_pitau_get(dir.theta, lMax, norm); - double const *pleg = pt.leg; - double const *ppi = pt.pi; - double const *ptau = pt.tau; - csphvec_t *p1 = a1target, *p2 = a2target, *p3 = a3target; - for (qpms_l_t l = 1; l <= lMax; ++l) { - for(qpms_m_t m = -l; m <= l; ++m) { - complex double eimf = cexp(m * dir.phi * I); - if (a1target) { - p1->rc = 0; - p1->thetac = *ppi * I * eimf; - p1->phic = -*ptau * eimf; - ++p1; - } - if (a2target) { - p2->rc = 0; - p2->thetac = *ptau * eimf; - p2->phic = *ppi * I * eimf; - ++p2; - } - if (a3target) { - p3->rc = sqrt(l*(l+1)) * (*pleg) * eimf; - p3->thetac = 0; - p3->phic = 0; - ++p3; - } - } - ++pleg; ++ppi; ++ptau; - } - qpms_pitau_free(pt); - return QPMS_SUCCESS; + qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm) { + assert(lMax >= 1); + qpms_pitau_t pt = qpms_pitau_get(dir.theta, lMax, norm); + double const *pleg = pt.leg; + double const *ppi = pt.pi; + double const *ptau = pt.tau; + csphvec_t *p1 = a1target, *p2 = a2target, *p3 = a3target; + for (qpms_l_t l = 1; l <= lMax; ++l) { + for(qpms_m_t m = -l; m <= l; ++m) { + complex double eimf = cexp(m * dir.phi * I); + if (a1target) { + p1->rc = 0; + p1->thetac = *ppi * I * eimf; + p1->phic = -*ptau * eimf; + ++p1; + } + if (a2target) { + p2->rc = 0; + p2->thetac = *ptau * eimf; + p2->phic = *ppi * I * eimf; + ++p2; + } + if (a3target) { + p3->rc = sqrt(l*(l+1)) * (*pleg) * eimf; + p3->thetac = 0; + p3->phic = 0; + ++p3; + } + } + ++pleg; ++ppi; ++ptau; + } + qpms_pitau_free(pt); + return QPMS_SUCCESS; } qpms_errno_t qpms_vecspharm_dual_fill(csphvec_t *const a1target, csphvec_t *const a2target, csphvec_t *const a3target, - qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm) { - assert(lMax >= 1); - qpms_pitau_t pt = qpms_pitau_get(dir.theta, lMax, norm); - double const *pleg = pt.leg; - double const *ppi = pt.pi; - double const *ptau = pt.tau; - csphvec_t *p1 = a1target, *p2 = a2target, *p3 = a3target; - for(qpms_l_t l = 1; l <= lMax; ++l) { - for(qpms_m_t m = -l; m <= l; ++m) { - double normfac = 1./qpms_normalisation_t_factor_abssquare(norm, l, m); // factor w.r.t. Kristensson - complex double eimf = cexp(m * dir.phi * I); - if (a1target) { - p1->rc = 0; - p1->thetac = conj(*ppi * normfac * I * eimf); - p1->phic = conj(-*ptau * normfac * eimf); - ++p1; - } - if (a2target) { - p2->rc = 0; - p2->thetac = conj(*ptau * normfac * eimf); - p2->phic = conj(*ppi * normfac * I * eimf); - ++p2; - } - if (a3target) { - p3->rc = conj(sqrt(l*(l+1)) * (*pleg) * normfac * eimf); - p3->thetac = 0; - p3->phic = 0; - ++p3; - } - ++pleg; ++ppi; ++ptau; - } - } - qpms_pitau_free(pt); - return QPMS_SUCCESS; + qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm) { + assert(lMax >= 1); + qpms_pitau_t pt = qpms_pitau_get(dir.theta, lMax, norm); + double const *pleg = pt.leg; + double const *ppi = pt.pi; + double const *ptau = pt.tau; + csphvec_t *p1 = a1target, *p2 = a2target, *p3 = a3target; + for(qpms_l_t l = 1; l <= lMax; ++l) { + for(qpms_m_t m = -l; m <= l; ++m) { + double normfac = 1./qpms_normalisation_t_factor_abssquare(norm, l, m); // factor w.r.t. Kristensson + complex double eimf = cexp(m * dir.phi * I); + if (a1target) { + p1->rc = 0; + p1->thetac = conj(*ppi * normfac * I * eimf); + p1->phic = conj(-*ptau * normfac * eimf); + ++p1; + } + if (a2target) { + p2->rc = 0; + p2->thetac = conj(*ptau * normfac * eimf); + p2->phic = conj(*ppi * normfac * I * eimf); + ++p2; + } + if (a3target) { + p3->rc = conj(sqrt(l*(l+1)) * (*pleg) * normfac * eimf); + p3->thetac = 0; + p3->phic = 0; + ++p3; + } + ++pleg; ++ppi; ++ptau; + } + } + qpms_pitau_free(pt); + return QPMS_SUCCESS; } static inline complex double ipowl(qpms_l_t l) { - switch(l % 4) { - case 0: return 1; - break; - case 1: return I; - break; - case 2: return -1; - break; - case 3: return -I; - break; - default: abort(); - } - assert(0); + switch(l % 4) { + case 0: return 1; + break; + case 1: return I; + break; + case 2: return -1; + break; + case 3: return -I; + break; + default: abort(); + } + assert(0); } qpms_errno_t qpms_planewave2vswf_fill_sph(sph_t wavedir, csphvec_t amplitude, - complex double *target_longcoeff, complex double *target_mgcoeff, - complex double *target_elcoeff, qpms_l_t lMax, qpms_normalisation_t norm) { - qpms_y_t nelem = qpms_lMax2nelem(lMax); - csphvec_t * const dual_A1 = malloc(3*nelem*sizeof(csphvec_t)), *const dual_A2 = dual_A1 + nelem, - * const dual_A3 = dual_A2 + nelem; - if (QPMS_SUCCESS != qpms_vecspharm_dual_fill(dual_A1, dual_A2, dual_A3, lMax, wavedir, norm)) - abort(); - const csphvec_t *pA1 = dual_A1, *pA2 = dual_A2, *pA3 = dual_A3; - complex double *plong = target_longcoeff, *pmg = target_mgcoeff, *pel = target_elcoeff; - for (qpms_l_t l = 1; l <= lMax; ++l) { - complex double prefac1 = 4 * M_PI * ipowl(l); - complex double prefac23 = - 4 * M_PI * ipowl(l+1); - for (qpms_m_t m = -l; m <= l; ++m) { - *plong = prefac23 * csphvec_dotnc(*pA3, amplitude); - *pmg = prefac1 * csphvec_dotnc(*pA1, amplitude); - *pel = prefac23 * csphvec_dotnc(*pA2, amplitude); - ++pA1; ++pA2; ++pA3; ++plong; ++pmg; ++pel; - } + complex double *target_longcoeff, complex double *target_mgcoeff, + complex double *target_elcoeff, qpms_l_t lMax, qpms_normalisation_t norm) { + qpms_y_t nelem = qpms_lMax2nelem(lMax); + csphvec_t * const dual_A1 = malloc(3*nelem*sizeof(csphvec_t)), *const dual_A2 = dual_A1 + nelem, + * const dual_A3 = dual_A2 + nelem; + if (QPMS_SUCCESS != qpms_vecspharm_dual_fill(dual_A1, dual_A2, dual_A3, lMax, wavedir, norm)) + abort(); + const csphvec_t *pA1 = dual_A1, *pA2 = dual_A2, *pA3 = dual_A3; + complex double *plong = target_longcoeff, *pmg = target_mgcoeff, *pel = target_elcoeff; + for (qpms_l_t l = 1; l <= lMax; ++l) { + complex double prefac1 = 4 * M_PI * ipowl(l); + complex double prefac23 = - 4 * M_PI * ipowl(l+1); + for (qpms_m_t m = -l; m <= l; ++m) { + *plong = prefac23 * csphvec_dotnc(*pA3, amplitude); + *pmg = prefac1 * csphvec_dotnc(*pA1, amplitude); + *pel = prefac23 * csphvec_dotnc(*pA2, amplitude); + ++pA1; ++pA2; ++pA3; ++plong; ++pmg; ++pel; + } - } - free(dual_A1); - return QPMS_SUCCESS; + } + free(dual_A1); + return QPMS_SUCCESS; } qpms_errno_t qpms_planewave2vswf_fill_cart(cart3_t wavedir_cart /*allow complex k?*/, ccart3_t amplitude_cart, - complex double * const longcoeff, complex double * const mgcoeff, - complex double * const elcoeff, qpms_l_t lMax, qpms_normalisation_t norm) + complex double * const longcoeff, complex double * const mgcoeff, + complex double * const elcoeff, qpms_l_t lMax, qpms_normalisation_t norm) { - sph_t wavedir_sph = cart2sph(wavedir_cart); - csphvec_t amplitude_sphvec = ccart2csphvec(amplitude_cart, wavedir_sph); - return qpms_planewave2vswf_fill_sph(wavedir_sph, amplitude_sphvec, - longcoeff, mgcoeff, elcoeff, lMax, norm); + sph_t wavedir_sph = cart2sph(wavedir_cart); + csphvec_t amplitude_sphvec = ccart2csphvec(amplitude_cart, wavedir_sph); + return qpms_planewave2vswf_fill_sph(wavedir_sph, amplitude_sphvec, + longcoeff, mgcoeff, elcoeff, lMax, norm); } csphvec_t qpms_eval_vswf(sph_t kr, - complex double * const lc, complex double *const mc, complex double *const ec, - qpms_l_t lMax, qpms_bessel_t btyp, qpms_normalisation_t norm) + complex double * const lc, complex double *const mc, complex double *const ec, + qpms_l_t lMax, qpms_bessel_t btyp, qpms_normalisation_t norm) { - qpms_y_t nelem = qpms_lMax2nelem(lMax); - csphvec_t lsum, msum, esum, lcomp, mcomp, ecomp; - csphvec_kahaninit(&lsum, &lcomp); - csphvec_kahaninit(&msum, &mcomp); - csphvec_kahaninit(&esum, &ecomp); - csphvec_t *lset = NULL, *mset = NULL, *eset = NULL; - if(lc) lset = malloc(nelem * sizeof(csphvec_t)); - if(mc) mset = malloc(nelem * sizeof(csphvec_t)); - if(ec) eset = malloc(nelem * sizeof(csphvec_t)); - qpms_vswf_fill(lset, mset, eset, lMax, kr, btyp, norm); - if(lc) for(qpms_y_t y = 0; y < nelem; ++y) - csphvec_kahanadd(&lsum, &lcomp, csphvec_scale(lc[y], lset[y])); - if(mc) for(qpms_y_t y = 0; y < nelem; ++y) - csphvec_kahanadd(&msum, &mcomp, csphvec_scale(mc[y], mset[y])); - if(ec) for(qpms_y_t y = 0; y < nelem; ++y) - csphvec_kahanadd(&esum, &ecomp, csphvec_scale(ec[y], eset[y])); - if(lc) free(lset); - if(mc) free(mset); - if(ec) free(eset); - //return csphvec_add(esum, csphvec_add(msum, lsum)); - csphvec_kahanadd(&esum, &ecomp, msum); - csphvec_kahanadd(&esum, &ecomp, lsum); - return esum; + qpms_y_t nelem = qpms_lMax2nelem(lMax); + csphvec_t lsum, msum, esum, lcomp, mcomp, ecomp; + csphvec_kahaninit(&lsum, &lcomp); + csphvec_kahaninit(&msum, &mcomp); + csphvec_kahaninit(&esum, &ecomp); + csphvec_t *lset = NULL, *mset = NULL, *eset = NULL; + if(lc) lset = malloc(nelem * sizeof(csphvec_t)); + if(mc) mset = malloc(nelem * sizeof(csphvec_t)); + if(ec) eset = malloc(nelem * sizeof(csphvec_t)); + qpms_vswf_fill(lset, mset, eset, lMax, kr, btyp, norm); + if(lc) for(qpms_y_t y = 0; y < nelem; ++y) + csphvec_kahanadd(&lsum, &lcomp, csphvec_scale(lc[y], lset[y])); + if(mc) for(qpms_y_t y = 0; y < nelem; ++y) + csphvec_kahanadd(&msum, &mcomp, csphvec_scale(mc[y], mset[y])); + if(ec) for(qpms_y_t y = 0; y < nelem; ++y) + csphvec_kahanadd(&esum, &ecomp, csphvec_scale(ec[y], eset[y])); + if(lc) free(lset); + if(mc) free(mset); + if(ec) free(eset); + //return csphvec_add(esum, csphvec_add(msum, lsum)); + csphvec_kahanadd(&esum, &ecomp, msum); + csphvec_kahanadd(&esum, &ecomp, lsum); + return esum; } diff --git a/qpms/vswf.h b/qpms/vswf.h index 778a3d0..03edc76 100644 --- a/qpms/vswf.h +++ b/qpms/vswf.h @@ -5,10 +5,10 @@ // Electric wave N; NI csphvec_t qpms_vswf_single_el(int m, int n, sph_t kdlj, - qpms_bessel_t btyp, qpms_normalisation_t norm); + qpms_bessel_t btyp, qpms_normalisation_t norm); // Magnetic wave M; NI csphvec_t qpms_vswf_single_mg(int m, int n, sph_t kdlj, - qpms_bessel_t btyp, qpms_normalisation_t norm); + qpms_bessel_t btyp, qpms_normalisation_t norm); // Set of electric and magnetic VSWF in spherical coordinate basis typedef struct { @@ -39,9 +39,9 @@ qpms_errno_t qpms_vswf_fill_alternative(csphvec_t *resultL, csphvec_t *resultM, qpms_bessel_t btyp, qpms_normalisation_t norm); qpms_errno_t qpms_vecspharm_fill(csphvec_t *const a1target, csphvec_t *const a2target, csphvec_t *const a3target, - qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm); + qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm); qpms_errno_t qpms_vecspharm_dual_fill(csphvec_t *const a1target, csphvec_t *const a2target, csphvec_t *const a3target, - qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm); + qpms_l_t lMax, sph_t dir, qpms_normalisation_t norm); qpms_errno_t qpms_planewave2vswf_fill_cart(cart3_t wavedir, ccart3_t amplitude, complex double *targt_longcoeff, complex double *target_mgcoeff, complex double *target_elcoeff, @@ -57,7 +57,7 @@ csphvec_t qpms_eval_vswf(sph_t where, qpms_vswfset_sph_t *qpms_vswfset_make(qpms_l_t lMax, sph_t kdlj, - qpms_bessel_t btyp, qpms_normalisation_t norm);//NI + qpms_bessel_t btyp, qpms_normalisation_t norm);//NI void qpms_vswfset_sph_pfree(qpms_vswfset_sph_t *);//NI #endif // QPMS_VSWF_H