diff --git a/Src/Base/AMReX_Math.H b/Src/Base/AMReX_Math.H index c4d8d524af..ed581a3997 100644 --- a/Src/Base/AMReX_Math.H +++ b/Src/Base/AMReX_Math.H @@ -5,10 +5,12 @@ #include #include #include +#include #include #include #include #include +#include #ifdef AMREX_USE_SYCL # include @@ -225,6 +227,70 @@ std::uint64_t umulhi (std::uint64_t a, std::uint64_t b) } #endif +template +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +T comp_ellint_1 (T m) +{ + // Computing K based on DLMF + // https://dlmf.nist.gov/19.8 + T tol = 1e-12; + T a0 = 1.0; + T g0 = std::sqrt(1.0 - m); + T a, g; + + // Find Arithmetic Geometric mean + while(std::abs(a0 - g0) > tol) { + a = 0.5*(a0 + g0); + g = std::sqrt(a0 * g0); + + a0 = a; + g0 = g; + } + + return 0.5*pi()/a; +} + +template +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +T comp_ellint_2 (T m) +{ + // Computing E based on DLMF + // https://dlmf.nist.gov/19.8 + T Kcomp = amrex::Math::comp_ellint_1(m); + T tol = 1e-12; + + // Step Zero + T a0 = 1.0; + T g0 = std::sqrt(1.0 - m); + T cn = std::sqrt(a0*a0 - g0*g0); + + // Step 1 + int n = 1; + T a = 0.5 * (a0 + g0); + T g = std::sqrt(a0*g0); + cn = 0.25*cn*cn/a; + + T sum_val = a*a; + a0 = a; + g0 = g; + + while(std::abs(cn*cn) > tol) { + // Compute coefficients for this iteration + a = 0.5 * (a0 + g0); + g = std::sqrt(a0*g0); + cn = 0.25*cn*cn/a; + + n++; + sum_val -= std::pow(2,n-1)*cn*cn; + + // Save a and g for next iteration + a0 = a; + g0 = g; + } + + return Kcomp*sum_val; +} + /*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause diff --git a/Src/Base/Parser/AMReX_Parser_Y.H b/Src/Base/Parser/AMReX_Parser_Y.H index 38f561ad78..1200d0cfe9 100644 --- a/Src/Base/Parser/AMReX_Parser_Y.H +++ b/Src/Base/Parser/AMReX_Parser_Y.H @@ -349,28 +349,26 @@ AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T parser_math_atanh (T a) { return std::atanh(a); } template -AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE -T parser_math_comp_ellint_1 (T a) +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +T parser_math_comp_ellint_1 (T k) { #if defined(__GNUC__) && !defined(__clang__) && !defined(__CUDA_ARCH__) && !defined(__NVCOMPILER) - return std::comp_ellint_1(a); + // return std::comp_ellint_1(k); + return amrex::Math::comp_ellint_1(k); #else - amrex::ignore_unused(a); - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(false, "parser: comp_ellint_1 only supported with gcc and cpu"); - return 0.0; + return amrex::Math::comp_ellint_1(k); #endif } template -AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE -T parser_math_comp_ellint_2 (T a) +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +T parser_math_comp_ellint_2 (T k) { #if defined(__GNUC__) && !defined(__clang__) && !defined(__CUDA_ARCH__) && !defined(__NVCOMPILER) - return std::comp_ellint_2(a); + // return std::comp_ellint_2(k); + return amrex::Math::comp_ellint_2(k); #else - amrex::ignore_unused(a); - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(false, "parser: comp_ellint_2 only supported with gcc and cpu"); - return 0.0; + return amrex::Math::comp_ellint_2(k); #endif }