diff --git a/src/probnum/randprocs/kernels/_exponentiated_quadratic.py b/src/probnum/randprocs/kernels/_exponentiated_quadratic.py index 5a868d73a..9b61f44f2 100644 --- a/src/probnum/randprocs/kernels/_exponentiated_quadratic.py +++ b/src/probnum/randprocs/kernels/_exponentiated_quadratic.py @@ -1,11 +1,11 @@ """Exponentiated quadratic kernel.""" +import functools from typing import Optional import numpy as np -from probnum.typing import ScalarLike, ShapeLike -import probnum.utils as _utils +from probnum.typing import ArrayLike, ShapeLike from ._kernel import IsotropicMixin, Kernel @@ -38,7 +38,7 @@ class ExpQuad(Kernel, IsotropicMixin): -------- >>> import numpy as np >>> from probnum.randprocs.kernels import ExpQuad - >>> K = ExpQuad(input_shape=(), lengthscale=0.1) + >>> K = ExpQuad((), lengthscales=0.1) >>> xs = np.linspace(0, 1, 3) >>> K.matrix(xs) array([[1.00000000e+00, 3.72665317e-06, 1.92874985e-22], @@ -46,10 +46,40 @@ class ExpQuad(Kernel, IsotropicMixin): [1.92874985e-22, 3.72665317e-06, 1.00000000e+00]]) """ - def __init__(self, input_shape: ShapeLike, lengthscale: ScalarLike = 1.0): - self.lengthscale = _utils.as_numpy_scalar(lengthscale) + def __init__(self, input_shape: ShapeLike, *, lengthscales: ArrayLike = 1.0): super().__init__(input_shape=input_shape) + # Input lengthscales + lengthscales = np.asarray( + lengthscales if lengthscales is not None else 1.0, dtype=np.double + ) + + if np.any(lengthscales <= 0): + raise ValueError(f"Lengthscales l={lengthscales} must be positive.") + + np.broadcast_to( # Check if the lengthscales broadcast to the input dimension + lengthscales, self.input_shape + ) + + self._lengthscales = lengthscales + + @property + def lengthscales(self) -> np.ndarray: + r"""Input lengthscales along the different input dimensions.""" + return self._lengthscales + + @property + def lengthscale(self) -> np.ndarray: + """Deprecated.""" + if self._lengthscales.shape == (): + return self._lengthscales + + raise ValueError("There is more than one lengthscale.") + + @functools.cached_property + def _scale_factors(self) -> np.ndarray: + return np.sqrt(0.5) / self._lengthscales + def _evaluate(self, x0: np.ndarray, x1: Optional[np.ndarray] = None) -> np.ndarray: if x1 is None: return np.ones_like( # pylint: disable=unexpected-keyword-arg @@ -58,5 +88,7 @@ def _evaluate(self, x0: np.ndarray, x1: Optional[np.ndarray] = None) -> np.ndarr ) return np.exp( - -self._squared_euclidean_distances(x0, x1) / (2.0 * self.lengthscale**2) + -self._squared_euclidean_distances( + x0, x1, scale_factors=self._scale_factors + ) ) diff --git a/src/probnum/randprocs/kernels/_kernel.py b/src/probnum/randprocs/kernels/_kernel.py index 62c440696..6da4539e3 100644 --- a/src/probnum/randprocs/kernels/_kernel.py +++ b/src/probnum/randprocs/kernels/_kernel.py @@ -3,6 +3,8 @@ from __future__ import annotations import abc +import functools +import operator from typing import Optional, Union import numpy as np @@ -159,6 +161,11 @@ def input_ndim(self) -> int: """Syntactic sugar for ``len(input_shape)``.""" return self._input_ndim + @property + def input_size(self) -> int: + """Syntactic sugar for the product over the input size.""" + return functools.reduce(operator.mul, self.input_shape, 1) + @property def output_shape(self) -> ShapeType: """Shape of single, i.e. non-batched, return values of the covariance function. @@ -468,45 +475,59 @@ def __rmul__(self, other: BinaryOperandType) -> Kernel: class IsotropicMixin(abc.ABC): # pylint: disable=too-few-public-methods r"""Mixin for isotropic kernels. - An isotropic kernel is a kernel which only depends on the Euclidean norm of the - distance between the arguments, i.e. + An isotropic kernel is a kernel which only depends on the norm of the difference of + the arguments, i.e. .. math :: - k(x_0, x_1) = k(\lVert x_0 - x_1 \rVert_2). + k(x_0, x_1) = k(\lVert x_0 - x_1 \rVert). Hence, all isotropic kernels are stationary. """ def _squared_euclidean_distances( - self, x0: np.ndarray, x1: Optional[np.ndarray] + self, + x0: np.ndarray, + x1: Optional[np.ndarray], + *, + scale_factors: Optional[np.ndarray] = None, ) -> np.ndarray: - """Implementation of the squared Euclidean distance, which supports scalar - inputs and an optional second argument.""" + """Implementation of the squared (modified) Euclidean distance, which supports + scalar inputs, an optional second argument, and separate scale factors for each + input dimension.""" + if x1 is None: return np.zeros_like( # pylint: disable=unexpected-keyword-arg x0, shape=x0.shape[: x0.ndim - self._input_ndim], ) - sqdiffs = (x0 - x1) ** 2 + sqdiffs = x0 - x1 - if self.input_ndim == 0: - return sqdiffs + if scale_factors is not None: + sqdiffs *= scale_factors - assert self.input_ndim == 1 + sqdiffs *= sqdiffs - return np.sum(sqdiffs, axis=-1) + return np.sum(sqdiffs, axis=tuple(range(-self.input_ndim, 0))) def _euclidean_distances( - self, x0: np.ndarray, x1: Optional[np.ndarray] + self, + x0: np.ndarray, + x1: Optional[np.ndarray], + *, + scale_factors: Optional[np.ndarray] = None, ) -> np.ndarray: - """Implementation of the Euclidean distance, which supports scalar inputs and an - optional second argument.""" + """Implementation of the (modified) Euclidean distance, which supports scalar + inputs, an optional second argument, and separate scale factors for each input + dimension.""" + if x1 is None: return np.zeros_like( # pylint: disable=unexpected-keyword-arg x0, shape=x0.shape[: x0.ndim - self._input_ndim], ) - return np.sqrt(self._squared_euclidean_distances(x0, x1)) + return np.sqrt( + self._squared_euclidean_distances(x0, x1, scale_factors=scale_factors) + ) diff --git a/src/probnum/randprocs/kernels/_matern.py b/src/probnum/randprocs/kernels/_matern.py index 99e28dc03..f7b5449d6 100644 --- a/src/probnum/randprocs/kernels/_matern.py +++ b/src/probnum/randprocs/kernels/_matern.py @@ -1,19 +1,20 @@ -"""Matern kernel.""" +"""Matérn kernel.""" -from typing import Optional +import fractions +import functools +from typing import Optional, Tuple import numpy as np -import scipy.spatial.distance import scipy.special -from probnum.typing import ScalarLike, ShapeLike +from probnum.typing import ArrayLike, ScalarLike, ScalarType, ShapeLike import probnum.utils as _utils from ._kernel import IsotropicMixin, Kernel class Matern(Kernel, IsotropicMixin): - r"""Matern kernel. + r"""Matérn kernel. Covariance function defined by @@ -21,29 +22,53 @@ class Matern(Kernel, IsotropicMixin): :nowrap: \begin{equation} - k(x_0, x_1) - = - \frac{1}{\Gamma(\nu) 2^{\nu - 1}} - \left( \frac{\sqrt{2 \nu}}{l} \lVert x_0 - x_1 \rVert_2 \right)^\nu - K_\nu \left( \frac{\sqrt{2 \nu}}{l} \lVert x_0 - x_1 \rVert_2 \right), + k_\nu(x_0, x_1) + := + \frac{2^{1 - \nu}}{\Gamma(\nu)} + \left( \sqrt{2 \nu} \lVert x_0 - x_1 \rVert_{\Lambda^{-1}} \right)^\nu + K_\nu \left( \sqrt{2 \nu} \lVert x_0 - x_1 \rVert_{\Lambda^{-1}} \right), \end{equation} - where :math:`K_\nu` is a modified Bessel function. The Matern kernel generalizes the - :class:`~probnum.randprocs.kernels.ExpQuad` kernel via its additional parameter - :math:`\nu` controlling the smoothness of the function. For :math:`\nu \rightarrow - \infty` the Matern kernel converges to the :class:`~probnum.randprocs.kernels.\ - ExpQuad` kernel. A Gaussian process with Matern covariance function is :math:`\lceil - \nu \rceil - 1` times differentiable. + where :math:`K_\nu` is a modified Bessel function of the second kind and + + .. math:: + \lVert x_0 - x_1 \rVert_{\Lambda^{-1}}^2 + := + \sum_{i = 1}^d \frac{(x_{0,i} - x_{1,i})^2}{l_i}. + + The Matérn kernel generalizes the :class:`~probnum.randprocs.kernels.ExpQuad` kernel + via its additional parameter :math:`\nu` controlling the smoothness of the functions + in the associated RKHS. For :math:`\nu \rightarrow \infty`, the Matérn kernel + converges to the :class:`~probnum.randprocs.kernels.ExpQuad` kernel. A Gaussian + process with Matérn covariance function is :math:`\lceil \nu \rceil - 1` times + differentiable. + + If :math:`\nu` is a half-integer, i.e. :math:`\nu = p + \frac{1}{2}` for some + nonnegative integer :math:`p`, then the expression for the kernel function + simplifies to a product of an exponential and a polynomial + + .. math:: + :nowrap: + + \begin{equation} + k_{\nu = p + \frac{1}{2}}(x_0, x_1) + = + \exp \left( -\sqrt{2 \nu} \lVert x_0 - x_1 \rVert_{\Lambda^{-1}} \right) + \frac{p!}{(2p)!} + \sum_{i = 0}^p \frac{(p + i)!}{i!(p - i)!} 2^{p - i} + \left( \sqrt{2 \nu} \lVert x_0 - x_1 \rVert_{\Lambda^{-1}} \right)^{p - i}. + \end{equation} Parameters ---------- input_shape - Shape of the kernel's input. - lengthscale - Lengthscale :math:`l` of the kernel. Describes the input scale on which the - process varies. + Shape of the kernel's inputs. nu Hyperparameter :math:`\nu` controlling differentiability. + lengthscales + Lengthscales :math:`l_i` along the different input dimensions of the kernel. + Describes the input scales on which the process varies. + The lengthscales will be broadcast to the input shape of the kernel. See Also -------- @@ -54,7 +79,7 @@ class Matern(Kernel, IsotropicMixin): -------- >>> import numpy as np >>> from probnum.randprocs.kernels import Matern - >>> K = Matern(input_shape=(), lengthscale=0.1, nu=2.5) + >>> K = Matern((), nu=2.5, lengthscales=0.1) >>> xs = np.linspace(0, 1, 3) >>> K.matrix(xs) array([[1.00000000e+00, 7.50933789e-04, 3.69569622e-08], @@ -65,53 +90,153 @@ class Matern(Kernel, IsotropicMixin): def __init__( self, input_shape: ShapeLike, - lengthscale: ScalarLike = 1.0, nu: ScalarLike = 1.5, + *, + lengthscales: Optional[ArrayLike] = None, ): - self.lengthscale = _utils.as_numpy_scalar(lengthscale) - if not self.lengthscale > 0: - raise ValueError(f"Lengthscale l={self.lengthscale} must be positive.") - self.nu = _utils.as_numpy_scalar(nu) - if not self.nu > 0: - raise ValueError(f"Hyperparameter nu={self.nu} must be positive.") - super().__init__(input_shape=input_shape) - def _evaluate(self, x0: np.ndarray, x1: Optional[np.ndarray] = None) -> np.ndarray: - distances = self._euclidean_distances(x0, x1) - - # Kernel matrix computation dependent on differentiability - if self.nu == 0.5: - return np.exp(-1.0 / self.lengthscale * distances) - - if self.nu == 1.5: - scaled_distances = np.sqrt(3) / self.lengthscale * distances - return (1.0 + scaled_distances) * np.exp(-scaled_distances) - - if self.nu == 2.5: - scaled_distances = np.sqrt(5) / self.lengthscale * distances - return (1.0 + scaled_distances + scaled_distances**2 / 3.0) * np.exp( - -scaled_distances - ) - if self.nu == 3.5: - scaled_distances = np.sqrt(7) / self.lengthscale * distances - # Using Horner's method speeds up computations substantially - return ( - 1.0 - + (1.0 + (2.0 / 5.0 + scaled_distances / 15.0) * scaled_distances) - * scaled_distances - ) * np.exp(-scaled_distances) - - if self.nu == np.inf: - return np.exp(-1.0 / (2.0 * self.lengthscale**2) * distances**2) - - # The modified Bessel function K_nu is not defined for z=0 - distances = np.maximum(distances, np.finfo(distances.dtype).eps) - - scaled_distances = np.sqrt(2 * self.nu) / self.lengthscale * distances - return ( - 2 ** (1.0 - self.nu) - / scipy.special.gamma(self.nu) - * scaled_distances**self.nu - * scipy.special.kv(self.nu, scaled_distances) + # Smoothness parameter ν + nu = _utils.as_numpy_scalar(nu) + + if nu <= 0: + raise ValueError(f"Hyperparameter nu={nu} must be positive.") + + self._nu = nu + + # Input lengthscales + lengthscales = np.asarray( + lengthscales if lengthscales is not None else 1.0, + dtype=np.double, ) + + if np.any(lengthscales <= 0): + raise ValueError(f"All lengthscales l={lengthscales} must be positive.") + + np.broadcast_to( # Check if the lengthscales broadcast to the input dimension + lengthscales, self.input_shape + ) + + self._lengthscales = lengthscales + + @property + def nu(self) -> ScalarType: + r"""Smoothness parameter :math:`\nu`.""" + return self._nu + + @functools.cached_property + def p(self) -> Optional[int]: + r"""Degree :math:`p` of the polynomial part of a Matérn kernel with half-integer + smoothness parameter :math:`\nu = p + \frac{1}{2}`. If :math:`\nu` is not a + half-integer, this is set to :data:`None`. + + Sample paths of a Gaussian process with this covariance function are + :math:`p`-times continuously differentiable.""" + nu_minus_half = self._nu - 0.5 + + # Half-integer values can be represented exactly in IEEE 754 floating point + # numbers + if nu_minus_half == int(nu_minus_half): + return int(nu_minus_half) + + return None + + @property + def is_half_integer(self) -> bool: + r"""Indicates whether :math:`\nu` is a half-integer.""" + return self.p is not None + + @property + def lengthscales(self) -> np.ndarray: + r"""Input lengthscales along the different input dimensions.""" + return self._lengthscales + + @property + def lengthscale(self) -> np.ndarray: + """Deprecated.""" + if self._lengthscales.shape == (): + return self._lengthscales + + raise ValueError("There is more than one lengthscale.") + + @functools.cached_property + def _scale_factors(self) -> np.ndarray: + return np.sqrt(2 * self._nu) / self._lengthscales + + def _evaluate(self, x0: np.ndarray, x1: Optional[np.ndarray]) -> np.ndarray: + scaled_dists = self._euclidean_distances( + x0, x1, scale_factors=self._scale_factors + ) + + if self.is_half_integer: + # Evaluate the polynomial part using Horner's method + coeffs = Matern._half_integer_coefficients_floating(self.p) + + res = coeffs[self.p] + + for i in range(self.p - 1, -1, -1): + res *= scaled_dists + res += coeffs[i] + + # Exponential part + res *= np.exp(-scaled_dists) + + return res + + return _matern_bessel(scaled_dists, nu=self._nu) + + @staticmethod + @functools.lru_cache(maxsize=None) + def half_integer_coefficients(p: int) -> Tuple[fractions.Fraction]: + r"""Computes the rational coefficients :math:`c_i` of the polynomial part of a + Matérn kernel with half-integer smoothness parameter :math:`\nu = p + + \frac{1}{2}`. + + We leverage the recursion + + .. math:: + :nowrap: + + \begin{align} + c_{p - i} + & := \frac{p!}{(2p)!} \frac{(p + i)!}{i!(p - i)!} 2^{p - i} \\ + & = \frac{2 (i + 1)}{(p + i + 1) (p - i)} c_{p - (i + 1)}, + \end{align} + + where :math:`c_0 = c_{p - p} = 1`. + + Parameters + ---------- + p: + Degree :math:`p` of the polynomial part. + + Returns + ------- + coefficients: + A tuple containing the exact rational coefficients of the polynomial part, + where the entry at index :math:`i` contains the coefficient corresponding to + the monomial with degree :math:`i`. + """ + coeffs = [fractions.Fraction(1, 1)] + + for i in range(p - 1, -1, -1): + coeffs.append(coeffs[-1] * 2 * (i + 1) / (p + i + 1) / (p - i)) + + return tuple(coeffs) + + @staticmethod + @functools.lru_cache(maxsize=None) + def _half_integer_coefficients_floating(p: int) -> np.ndarray: + return np.asarray(Matern.half_integer_coefficients(p), dtype=np.float64) + + +def _matern_bessel(scaled_dists: np.ndarray, nu: ScalarType) -> np.ndarray: + # The modified Bessel function K_nu is not defined for z=0 + scaled_dists = np.maximum(scaled_dists, np.finfo(scaled_dists.dtype).eps) + + return ( + 2 ** (1.0 - nu) + / scipy.special.gamma(nu) + * scaled_dists**nu + * scipy.special.kv(nu, scaled_dists) + ) diff --git a/src/probnum/randprocs/kernels/_product_matern.py b/src/probnum/randprocs/kernels/_product_matern.py index e4f522d33..8b14ea26c 100644 --- a/src/probnum/randprocs/kernels/_product_matern.py +++ b/src/probnum/randprocs/kernels/_product_matern.py @@ -90,7 +90,7 @@ def expand_array(x, ndim): univariate_materns = [] for dim in range(input_dim): univariate_materns.append( - Matern(input_shape=(), lengthscale=lengthscales[dim], nu=nus[dim]) + Matern(input_shape=(), lengthscales=lengthscales[dim], nu=nus[dim]) ) self.univariate_materns = univariate_materns self.nus = nus diff --git a/tests/test_linalg/cases/matrices.py b/tests/test_linalg/cases/matrices.py index fe97c5e67..68c1f58e5 100644 --- a/tests/test_linalg/cases/matrices.py +++ b/tests/test_linalg/cases/matrices.py @@ -31,7 +31,7 @@ def case_kernel_matrix(n: int, rng: np.random.Generator) -> np.ndarray: """Kernel Gram matrix.""" x_min, x_max = (-4.0, 4.0) X = rng.uniform(x_min, x_max, (n, 1)) - kern = kernels.ExpQuad(input_shape=1, lengthscale=1) + kern = kernels.ExpQuad(input_shape=1) return kern(X) diff --git a/tests/test_quad/conftest.py b/tests/test_quad/conftest.py index 961002356..d20145984 100644 --- a/tests/test_quad/conftest.py +++ b/tests/test_quad/conftest.py @@ -127,7 +127,7 @@ def fixture_measure(measure_params) -> IntegrationMeasure: params=[ pytest.param(kerndef, id=kerndef[0].__name__) for kerndef in [ - (kernels.ExpQuad, {"lengthscale": 1.25}), + (kernels.ExpQuad, {"lengthscales": 1.25}), ] ], name="kernel", @@ -178,7 +178,7 @@ def fixture_matern_nu(request) -> int: @pytest.fixture( params=[ - pytest.param(matern_lengthscale, id=f"lengthscale={matern_lengthscale}") + pytest.param(matern_lengthscale, id=f"lengthscales={matern_lengthscale}") for matern_lengthscale in [0.8, 1.0, 1.25] ], name="matern_lengthscale", diff --git a/tests/test_randprocs/conftest.py b/tests/test_randprocs/conftest.py index a0f56efa0..d754c1c50 100644 --- a/tests/test_randprocs/conftest.py +++ b/tests/test_randprocs/conftest.py @@ -65,9 +65,9 @@ def fixture_mean(request, input_dim: int) -> Callable: pytest.param(kerndef, id=kerndef[0].__name__) for kerndef in [ (kernels.Polynomial, {"constant": 1.0, "exponent": 3}), - (kernels.ExpQuad, {"lengthscale": 1.5}), + (kernels.ExpQuad, {"lengthscales": 1.5}), (kernels.RatQuad, {"lengthscale": 0.5, "alpha": 2.0}), - (kernels.Matern, {"lengthscale": 0.5, "nu": 1.5}), + (kernels.Matern, {"lengthscales": 0.5, "nu": 1.5}), ] ], name="cov", diff --git a/tests/test_randprocs/test_kernels/conftest.py b/tests/test_randprocs/test_kernels/conftest.py index 60a3cd0d1..dbd8656f7 100644 --- a/tests/test_randprocs/test_kernels/conftest.py +++ b/tests/test_randprocs/test_kernels/conftest.py @@ -38,13 +38,12 @@ def fixture_input_shape(request) -> ShapeType: (pn.randprocs.kernels.Linear, {"constant": 1.0}), (pn.randprocs.kernels.WhiteNoise, {"sigma_sq": 1.0}), (pn.randprocs.kernels.Polynomial, {"constant": 1.0, "exponent": 3}), - (pn.randprocs.kernels.ExpQuad, {"lengthscale": 1.5}), + (pn.randprocs.kernels.ExpQuad, {"lengthscales": 1.5}), (pn.randprocs.kernels.RatQuad, {"lengthscale": 0.5, "alpha": 2.0}), - (pn.randprocs.kernels.Matern, {"lengthscale": 0.5, "nu": 0.5}), - (pn.randprocs.kernels.Matern, {"lengthscale": 0.5, "nu": 1.5}), - (pn.randprocs.kernels.Matern, {"lengthscale": 1.5, "nu": 2.5}), - (pn.randprocs.kernels.Matern, {"lengthscale": 2.5, "nu": 7.0}), - (pn.randprocs.kernels.Matern, {"lengthscale": 3.0, "nu": np.inf}), + (pn.randprocs.kernels.Matern, {"lengthscales": 0.5, "nu": 0.5}), + (pn.randprocs.kernels.Matern, {"lengthscales": 0.5, "nu": 1.5}), + (pn.randprocs.kernels.Matern, {"lengthscales": 1.5, "nu": 2.5}), + (pn.randprocs.kernels.Matern, {"lengthscales": 2.5, "nu": 7.0}), (pn.randprocs.kernels.ProductMatern, {"lengthscales": 0.5, "nus": 0.5}), ] ], diff --git a/tests/test_randprocs/test_kernels/test_exponentiated_quadratic.py b/tests/test_randprocs/test_kernels/test_exponentiated_quadratic.py new file mode 100644 index 000000000..88f35c58f --- /dev/null +++ b/tests/test_randprocs/test_kernels/test_exponentiated_quadratic.py @@ -0,0 +1,13 @@ +"""Test cases for the `ExpQuad` kernel.""" + +import numpy as np +import pytest + +from probnum.randprocs import kernels + + +@pytest.mark.parametrize("lengthscales", (0.0, -1.0, (0.0, 1.0), (-0.2, 2.0))) +def test_nonpositive_lengthscales_raises_exception(lengthscales): + """Check whether a non-positive `lengthscales` parameter raises a ValueError.""" + with pytest.raises(ValueError): + kernels.ExpQuad(np.shape(lengthscales), lengthscales=lengthscales) diff --git a/tests/test_randprocs/test_kernels/test_kernel.py b/tests/test_randprocs/test_kernels/test_kernel.py new file mode 100644 index 000000000..b5837cf87 --- /dev/null +++ b/tests/test_randprocs/test_kernels/test_kernel.py @@ -0,0 +1,13 @@ +"""Test cases for `Kernel`""" + +import numpy as np + +from probnum.randprocs import kernels + + +def test_input_ndim(kernel: kernels.Kernel): + assert kernel.input_ndim == np.empty(kernel.input_shape).ndim + + +def test_input_size(kernel: kernels.Kernel): + assert kernel.input_size == np.empty(kernel.input_shape).size diff --git a/tests/test_randprocs/test_kernels/test_matern.py b/tests/test_randprocs/test_kernels/test_matern.py index b58dc75b5..8d94548f1 100644 --- a/tests/test_randprocs/test_kernels/test_matern.py +++ b/tests/test_randprocs/test_kernels/test_matern.py @@ -4,6 +4,7 @@ import pytest from probnum.randprocs import kernels +from probnum.randprocs.kernels._matern import _matern_bessel from probnum.typing import ShapeType @@ -14,13 +15,44 @@ def test_nonpositive_nu_raises_exception(nu): kernels.Matern(input_shape=(), nu=nu) +@pytest.mark.parametrize("lengthscales", (0.0, -1.0, (0.0, 1.0), (-0.2, 2.0))) +def test_nonpositive_lengthscales_raises_exception(lengthscales): + """Check whether a non-positive `lengthscales` parameter raises a ValueError.""" + with pytest.raises(ValueError): + kernels.Matern(np.shape(lengthscales), lengthscales=lengthscales) + + +@pytest.mark.parametrize("p", range(4)) +def test_half_integer_impl_equals_naive_impl( + input_shape: ShapeType, p: int, x0: np.ndarray, x1: np.ndarray +): + """Test whether the optimized half-integer implementation produces the same results + as the general implementation based on the Bessel function.""" + rng = np.random.default_rng(537892325) + + nu = p + 0.5 + lengthscales = rng.gamma(1.0, size=input_shape) + 0.5 + + k = kernels.Matern(input_shape, nu=nu, lengthscales=lengthscales) + + assert k.is_half_integer + assert k.p == p + + assert np.all(lengthscales > 0) + + # Compare against naive implementation + k_naive = NaiveMatern(input_shape, nu=nu, lengthscales=lengthscales) + + np.testing.assert_allclose(k.matrix(x0, x1), k_naive.matrix(x0, x1)) + + def test_nu_large_recovers_rbf_kernel( x0: np.ndarray, x1: np.ndarray, input_shape: ShapeType ): """Test whether a Matern kernel with nu large is close to an RBF kernel.""" lengthscale = 1.25 - rbf = kernels.ExpQuad(input_shape=input_shape, lengthscale=lengthscale) - matern = kernels.Matern(input_shape=input_shape, lengthscale=lengthscale, nu=15) + rbf = kernels.ExpQuad(input_shape=input_shape, lengthscales=lengthscale) + matern = kernels.Matern(input_shape=input_shape, lengthscales=lengthscale, nu=15) np.testing.assert_allclose( rbf.matrix(x0, x1), @@ -29,3 +61,18 @@ def test_nu_large_recovers_rbf_kernel( rtol=0.05, atol=0.01, ) + + +class NaiveMatern(kernels.IsotropicMixin, kernels.Kernel): + def __init__(self, input_shape, *, nu, lengthscales): + super().__init__(input_shape=input_shape) + + self._nu = nu + self._lengthscales = lengthscales + + def _evaluate(self, x0, x1): + return _matern_bessel( + np.sqrt(2 * self._nu) + * self._euclidean_distances(x0, x1, scale_factors=1.0 / self._lengthscales), + nu=self._nu, + ) diff --git a/tests/test_randprocs/test_kernels/test_product_matern.py b/tests/test_randprocs/test_kernels/test_product_matern.py index 31221b974..2bcf9ef97 100644 --- a/tests/test_randprocs/test_kernels/test_product_matern.py +++ b/tests/test_randprocs/test_kernels/test_product_matern.py @@ -12,7 +12,7 @@ def test_kernel_matrix(input_dim, nu): """Check that the product Matérn kernel matrix is an elementwise product of 1D Matérn kernel matrices.""" lengthscale = 1.25 - matern = kernels.Matern(input_shape=(1,), lengthscale=lengthscale, nu=nu) + matern = kernels.Matern(input_shape=(1,), lengthscales=lengthscale, nu=nu) product_matern = kernels.ProductMatern( input_shape=(input_dim,), lengthscales=lengthscale, nus=nu )