From 2149a9ca7170e2aa346b95532a754ceb2e6fbd09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ne=C4=8Dada?= Date: Wed, 3 May 2017 08:08:58 +0300 Subject: [PATCH] Axes bugfix Former-commit-id: 8c58c30104d142779e698646a46ca950520307c0 --- qpms/proftest.c | 44 ++++++++++++++++ qpms/translations.c | 119 ++++++++++++++++++++++++-------------------- 2 files changed, 110 insertions(+), 53 deletions(-) create mode 100644 qpms/proftest.c diff --git a/qpms/proftest.c b/qpms/proftest.c new file mode 100644 index 0000000..dd040d7 --- /dev/null +++ b/qpms/proftest.c @@ -0,0 +1,44 @@ + +#include "translations.h" +#include +//#include +#include + +typedef struct { + int m, n, mu, nu; + sph_t kdlj; + qpms_bessel_t J; + complex double result_A, result_B; +} testcase_single_trans_t; + +testcase_single_trans_t testcases_Taylor[] = { +#include "testcases_taylor" +}; + + +int main() { + int repete = 500; + int lMax = 3; + qpms_trans_calculator *c = qpms_trans_calculator_init(lMax, QPMS_NORMALIZATION_TAYLOR); + for( int rr = 0; rr < repete; rr++) + for(testcase_single_trans_t *tc = testcases_Taylor; tc->J != QPMS_BESSEL_UNDEF; tc++) { + //if (tc->n > 40 || tc->nu > 40 ) continue; + complex double A_array[c->nelem * c->nelem]; + complex double B_array[c->nelem * c->nelem]; + qpms_trans_calculator_get_AB_arrays(c, A_array, B_array, c->nelem, 1, tc->kdlj, true, tc->J); +#if 0 + complex double A = qpms_trans_single_A_Taylor(tc->m, tc->n, tc->mu, tc->nu, tc->kdlj, true, tc->J); + complex double B = qpms_trans_single_B_Taylor(tc->m, tc->n, tc->mu, tc->nu, tc->kdlj, true, tc->J); + printf("A = %.16f+%.16fj, relerr=%.16f, J=%d\n", + creal(A), cimag(A), (0 == cabs(tc->result_A - A)) ? 0 : + cabs(tc->result_A - A)/((cabs(A) < cabs(tc->result_A)) ? cabs(A) : cabs(tc->result_A)), + tc->J); + printf("B = %.16f+%.16fj, relerr=%.16f, J=%d\n", + creal(B), cimag(B), (0 == cabs(tc->result_B - B)) ? 0 : + cabs(tc->result_B - B)/((cabs(B) < cabs(tc->result_B)) ? cabs(B) : cabs(tc->result_B)), + tc->J); +#endif + } +} + + diff --git a/qpms/translations.c b/qpms/translations.c index 66d5ed7..82b95b4 100644 --- a/qpms/translations.c +++ b/qpms/translations.c @@ -665,6 +665,10 @@ int qpms_trans_calculator_get_AB_arrays_ext(const qpms_trans_calculator *c, #ifdef QPMS_COMPILE_PYTHON_EXTENSIONS #include +#ifdef QPMS_USE_OMP +#include +#endif + int qpms_cython_trans_calculator_get_AB_arrays_loop( const qpms_trans_calculator *c, const qpms_bessel_t J, const int resnd, const int daxis, const int saxis, @@ -677,13 +681,17 @@ int qpms_cython_trans_calculator_get_AB_arrays_loop( assert(daxis != saxis); assert(resnd >= 2); int longest_axis = 0; + int longestshape = 1; const npy_intp *resultshape = A_shape, *resultstrides = A_strides; // TODO put some restrict's everywhere? for (int ax = 0; ax < resnd; ++ax){ assert(A_shape[ax] == B_shape[ax]); assert(A_strides[ax] == B_strides[ax]); if (daxis == ax || saxis == ax) continue; - if (A_shape[ax] > A_shape[longest_axis]) longest_axis = ax; + if (A_shape[ax] > longestshape) { + longest_axis = ax; + longestshape = 1; + } } const npy_intp longlen = resultshape[longest_axis]; @@ -698,72 +706,77 @@ int qpms_cython_trans_calculator_get_AB_arrays_loop( innerloop_shape[longest_axis] = 1; innerloop_shape[daxis] = 1; innerloop_shape[saxis] = 1; - + // these are the 'strides' passed to the qpms_trans_calculator_get_AB_arrays_ext // function, which expects 'const double *' strides, not 'char *' ones. const npy_intp dstride = resultstrides[daxis] / sizeof(complex double); const npy_intp sstride = resultstrides[saxis] / sizeof(complex double); + int errval = 0; // TODO here start parallelisation - npy_intp local_indices[resnd]; - memset(local_indices, 0, sizeof(local_indices)); - int errval_local = 0; +//#pragma omp parallel + { + npy_intp local_indices[resnd]; + memset(local_indices, 0, sizeof(local_indices)); + int errval_local = 0; + size_t longi; +//#pragma omp for + for(longi = 0; longi < longlen; ++longi) { + // this might be done also in the inverse order, but this is more + // 'c-contiguous' way of incrementing the indices + int ax = resnd - 1; + while(ax >= 0) { + /* calculate the correct index/pointer for each array used. + * This can be further optimized from O(resnd * total size of + * the result array) to O(total size of the result array), but + * fick that now + */ + const char *r_p = r_data + r_strides[longest_axis] * longi; + const char *theta_p = theta_data + theta_strides[longest_axis] * longi; + const char *phi_p = phi_data + phi_strides[longest_axis] * longi; + const char *r_ge_d_p = r_ge_d_data + r_ge_d_strides[longest_axis] * longi; + char *A_p = A_data + A_strides[longest_axis] * longi; + char *B_p = B_data + B_strides[longest_axis] * longi; + for(int i = 0; i < resnd; ++i) { + // following two lines are probably not needed, as innerloop_shape is there 1 anyway + // so if i == daxis, saxis, or longest_axis, local_indices[i] is zero. + if (i == longest_axis) continue; + if (daxis == i || saxis == i) continue; + r_p += r_strides[i] * local_indices[i]; + theta_p += theta_strides[i] * local_indices[i]; + phi_p += phi_strides[i] * local_indices[i]; + A_p += A_strides[i] * local_indices[i]; + B_p += B_strides[i] * local_indices[i]; + } - for(npy_intp longi = 0; longi < longlen; ++longi) { - // this might be done also in the inverse order, but this is more - // 'c-contiguous' way of incrementing the indices - int ax = resnd - 1; - while(ax >= 0) { - /* calculate the correct index/pointer for each array used. - * This can be further optimized from O(resnd * total size of - * the result array) to O(total size of the result array), but - * fick that now - */ - const char *r_p = r_data + r_strides[longest_axis] * longi; - const char *theta_p = theta_data + theta_strides[longest_axis] * longi; - const char *phi_p = phi_data + phi_strides[longest_axis] * longi; - const char *r_ge_d_p = r_ge_d_data + r_ge_d_strides[longest_axis] * longi; - char *A_p = A_data + A_strides[longest_axis] * longi; - char *B_p = B_data + B_strides[longest_axis] * longi; - for(int i = 0; i < resnd; ++i) { - // following two lines are probably not needed, as innerloop_shape is there 1 anyway - // so if i == daxis, saxis, or longest_axis, local_indices[i] is zero. - if (i == longest_axis) continue; - if (daxis == i || saxis == i) continue; - r_p += r_strides[i] * local_indices[i]; - theta_p += theta_strides[i] * local_indices[i]; - phi_p += phi_strides[i] * local_indices[i]; - A_p += A_strides[i] * local_indices[i]; - B_p += B_strides[i] * local_indices[i]; + // perform the actual task here + errval_local |= qpms_trans_calculator_get_AB_arrays_ext(c, (complex double *)A_p, + (complex double *)B_p, + dstride, sstride, + // FIXME change all the _ext function types to npy_... so that + // these casts are not needed + *((double *) r_p), *((double *) theta_p), *((double *)phi_p), + (int)(*((npy_bool *) r_ge_d_p)), J); + if (errval_local) abort(); + + // increment the last index 'digit' (ax is now resnd-1; we don't have do-while loop in python) + ++local_indices[ax]; + while(local_indices[ax] == innerloop_shape[ax] && ax >= 0) { + // overflow to the next digit but stop when reached below the last one + local_indices[ax] = 0; + local_indices[--ax]++; + } + if (ax >= 0) // did not overflow, get back to the lowest index + ax = resnd - 1; } - - // perform the actual task here - errval_local |= qpms_trans_calculator_get_AB_arrays_ext(c, (complex double *)A_p, - (complex double *)B_p, - dstride, sstride, - // FIXME change all the _ext function types to npy_... so that - // these casts are not needed - *((double *) r_p), *((double *) theta_p), *((double *)phi_p), - (int)(*((npy_bool *) r_ge_d_p)), J); - if (errval_local) abort(); - - // increment the last index 'digit' (ax is now resnd-1; we don't have do-while loop in python) - ++local_indices[ax]; - while(local_indices[ax] == innerloop_shape[ax] && ax >= 0) { - // overflow to the next digit but stop when reached below the last one - local_indices[ax] = 0; - local_indices[--ax]++; - } - if (ax >= 0) // did not overflow, get back to the lowest index - ax = resnd - 1; } + errval |= errval_local; } // FIXME when parallelizing - int errval = errval_local; // TODO Here end parallelisation return errval; } #endif // QPMS_COMPILE_PYTHON_EXTENSIONS - +