diff --git a/.gitignore b/.gitignore index 323fee4a40..21e367f7b3 100644 --- a/.gitignore +++ b/.gitignore @@ -47,3 +47,5 @@ pytestdebug.log # Codespaces pythonenv* env/ +venv/ +.venv/ diff --git a/pymc/gp/cov.py b/pymc/gp/cov.py index c910ce93fd..3977033086 100644 --- a/pymc/gp/cov.py +++ b/pymc/gp/cov.py @@ -18,7 +18,7 @@ from collections import Counter from functools import reduce from operator import add, mul -from typing import Optional, Sequence +from typing import Any, Callable, List, Optional, Sequence, Union import numpy as np import pytensor.tensor as pt @@ -47,13 +47,21 @@ "Kron", ] +TensorLike = Union[np.ndarray, TensorVariable] +IntSequence = Union[np.ndarray, Sequence[int]] + class BaseCovariance: """ Base class for kernels/covariance functions. """ - def __call__(self, X, Xs=None, diag=False): + def __call__( + self, + X: TensorLike, + Xs: Optional[TensorLike] = None, + diag: bool = False, + ) -> TensorVariable: r""" Evaluate the kernel/covariance function. @@ -71,32 +79,36 @@ def __call__(self, X, Xs=None, diag=False): else: return self.full(X, Xs) - def diag(self, X): + def diag(self, X: TensorLike) -> TensorVariable: raise NotImplementedError - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: raise NotImplementedError - def __add__(self, other): + def __add__(self, other) -> "Add": # If it's a scalar, cast as Constant covariance. This allows validation for power spectral # density calc. if isinstance(other, numbers.Real): other = Constant(c=other) return Add([self, other]) - def __mul__(self, other): + def __mul__(self, other) -> "Prod": return Prod([self, other]) - def __radd__(self, other): + def __radd__(self, other) -> "Add": return self.__add__(other) - def __rmul__(self, other): + def __rmul__(self, other) -> "Prod": return self.__mul__(other) - def __pow__(self, other): + def __pow__(self, other) -> "Exponentiated": other = pt.as_tensor_variable(other).squeeze() if not other.ndim == 0: raise ValueError("A covariance function can only be exponentiated by a scalar value") + if not isinstance(self, Covariance): + raise TypeError( + "Can only exponentiate covariance functions which inherit from `Covariance`" + ) return Exponentiated(self, other) def __array_wrap__(self, result): @@ -129,6 +141,10 @@ def __array_wrap__(self, result): "Known types are `Add` or `Prod`." ) + @staticmethod + def _alloc(X, *shape: int) -> TensorVariable: + return pt.alloc(X, *shape) # type: ignore + class Covariance(BaseCovariance): """ @@ -145,7 +161,7 @@ class Covariance(BaseCovariance): function operates on. """ - def __init__(self, input_dim: int, active_dims: Optional[Sequence[int]] = None): + def __init__(self, input_dim: int, active_dims: Optional[IntSequence] = None): self.input_dim = input_dim if active_dims is None: self.active_dims = np.arange(input_dim) @@ -156,7 +172,7 @@ def __init__(self, input_dim: int, active_dims: Optional[Sequence[int]] = None): raise ValueError("Values in `active_dims` can't be larger than `input_dim`.") @property - def n_dims(self): + def n_dims(self) -> int: """The dimensionality of the input, as taken from the `active_dims`. """ @@ -182,7 +198,7 @@ def _slice(self, X, Xs=None): class Combination(Covariance): - def __init__(self, factor_list): + def __init__(self, factor_list: Sequence): """Use constituent factors to get input_dim and active_dims for the Combination covariance.""" # Check if all input_dim are the same in factor_list @@ -207,11 +223,10 @@ def __init__(self, factor_list): dtype=int, ) ) - super().__init__(input_dim=input_dim, active_dims=active_dims) # Set up combination kernel, flatten out factor_list so that - self._factor_list = [] + self._factor_list: List[Any] = [] for factor in factor_list: if isinstance(factor, self.__class__): self._factor_list.extend(factor._factor_list) @@ -295,35 +310,46 @@ def _merge_factors_psd(self, omega): class Add(Combination): - def __call__(self, X, Xs=None, diag=False): + def __call__( + self, + X: TensorLike, + Xs: Optional[TensorLike] = None, + diag: bool = False, + ) -> TensorVariable: return reduce(add, self._merge_factors_cov(X, Xs, diag)) - def power_spectral_density(self, omega): + def power_spectral_density(self, omega: TensorLike) -> TensorVariable: return reduce(add, self._merge_factors_psd(omega)) class Prod(Combination): - def __call__(self, X, Xs=None, diag=False): + def __call__( + self, + X: TensorLike, + Xs: Optional[TensorLike] = None, + diag: bool = False, + ) -> TensorVariable: return reduce(mul, self._merge_factors_cov(X, Xs, diag)) - def power_spectral_density(self, omega): + def power_spectral_density(self, omega: TensorLike) -> TensorVariable: check = Counter([isinstance(factor, Covariance) for factor in self._factor_list]) - if check.get(True) >= 2: + if check.get(True, 0) >= 2: raise NotImplementedError( "The power spectral density of products of covariance " "functions is not implemented." ) - return reduce(mul, self._merge_factors_psd(omega)) class Exponentiated(Covariance): - def __init__(self, kernel, power): + def __init__(self, kernel: Covariance, power): self.kernel = kernel self.power = power super().__init__(input_dim=self.kernel.input_dim, active_dims=self.kernel.active_dims) - def __call__(self, X, Xs=None, diag=False): + def __call__( + self, X: TensorLike, Xs: Optional[TensorLike] = None, diag: bool = False + ) -> TensorVariable: return self.kernel(X, Xs, diag=diag) ** self.power @@ -343,7 +369,7 @@ class Kron(Covariance): implementations. """ - def __init__(self, factor_list): + def __init__(self, factor_list: Sequence[Covariance]): self.input_dims = [factor.input_dim for factor in factor_list] input_dim = sum(self.input_dims) super().__init__(input_dim=input_dim) @@ -358,7 +384,9 @@ def _split(self, X, Xs): Xs_split = [None] * len(X_split) return X_split, Xs_split - def __call__(self, X, Xs=None, diag=False): + def __call__( + self, X: TensorLike, Xs: Optional[TensorLike] = None, diag: bool = False + ) -> TensorVariable: X_split, Xs_split = self._split(X, Xs) covs = [cov(x, xs, diag) for cov, x, xs in zip(self._factor_list, X_split, Xs_split)] return reduce(mul, covs) @@ -376,14 +404,14 @@ class Constant(BaseCovariance): def __init__(self, c): self.c = c - def diag(self, X): - return pt.alloc(self.c, X.shape[0]) + def diag(self, X: TensorLike) -> TensorVariable: + return self._alloc(self.c, X.shape[0]) - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: if Xs is None: - return pt.alloc(self.c, X.shape[0], X.shape[0]) + return self._alloc(self.c, X.shape[0], X.shape[0]) else: - return pt.alloc(self.c, X.shape[0], Xs.shape[0]) + return self._alloc(self.c, X.shape[0], Xs.shape[0]) class WhiteNoise(BaseCovariance): @@ -398,14 +426,14 @@ class WhiteNoise(BaseCovariance): def __init__(self, sigma): self.sigma = sigma - def diag(self, X): - return pt.alloc(pt.square(self.sigma), X.shape[0]) + def diag(self, X: TensorLike) -> TensorVariable: + return self._alloc(pt.square(self.sigma), X.shape[0]) - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: if Xs is None: return pt.diag(self.diag(X)) else: - return pt.alloc(0.0, X.shape[0], Xs.shape[0]) + return self._alloc(0.0, X.shape[0], Xs.shape[0]) class Circular(Covariance): @@ -440,7 +468,13 @@ class Circular(Covariance): https://hal.archives-ouvertes.fr/hal-01119942v1/document """ - def __init__(self, input_dim, period, tau=4, active_dims=None): + def __init__( + self, + input_dim: int, + period, + tau=4, + active_dims: Optional[IntSequence] = None, + ): super().__init__(input_dim, active_dims) self.c = pt.as_tensor_variable(period / 2) self.tau = tau @@ -455,12 +489,12 @@ def dist(self, X, Xs): def weinland(self, t): return (1 + self.tau * t / self.c) * pt.clip(1 - t / self.c, 0, np.inf) ** self.tau - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) return self.weinland(self.dist(X, Xs)) - def diag(self, X): - return pt.alloc(1.0, X.shape[0]) + def diag(self, X: TensorLike) -> TensorVariable: + return self._alloc(1.0, X.shape[0]) class Stationary(Covariance): @@ -470,11 +504,17 @@ class Stationary(Covariance): Parameters ---------- ls: Lengthscale. If input_dim > 1, a list or array of scalars or PyMC random - variables. If input_dim == 1, a scalar or PyMC random variable. + variables. If input_dim == 1, a scalar or PyMC random variable. ls_inv: Inverse lengthscale. 1 / ls. One of ls or ls_inv must be provided. """ - def __init__(self, input_dim, ls=None, ls_inv=None, active_dims=None): + def __init__( + self, + input_dim: int, + ls=None, + ls_inv=None, + active_dims: Optional[IntSequence] = None, + ): super().__init__(input_dim, active_dims) if (ls is None and ls_inv is None) or (ls is not None and ls_inv is not None): raise ValueError("Only one of 'ls' or 'ls_inv' must be provided") @@ -502,52 +542,21 @@ def square_dist(self, X, Xs): def euclidean_dist(self, X, Xs): r2 = self.square_dist(X, Xs) + return self._sqrt(r2) + + def _sqrt(self, r2): return pt.sqrt(r2 + 1e-12) - def diag(self, X): - return pt.alloc(1.0, X.shape[0]) + def diag(self, X: TensorLike) -> TensorVariable: + return self._alloc(1.0, X.shape[0]) - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: raise NotImplementedError - def power_spectral_density(self, omega): + def power_spectral_density(self, omega: TensorLike) -> TensorVariable: raise NotImplementedError -class Periodic(Stationary): - r""" - The Periodic kernel. - - .. math:: - k(x, x') = \mathrm{exp}\left( -\frac{\mathrm{sin}^2(\pi |x-x'| \frac{1}{T})}{2\ell^2} \right) - - Notes - ----- - Note that the scaling factor for this kernel is different compared to the more common - definition (see [1]_). Here, 0.5 is in the exponent instead of the more common value, 2. - Divide the length-scale by 2 when initializing the kernel to recover the standard definition. - - References - ---------- - .. [1] David Duvenaud, "The Kernel Cookbook" - https://www.cs.toronto.edu/~duvenaud/cookbook/ - """ - - def __init__(self, input_dim, period, ls=None, ls_inv=None, active_dims=None): - super().__init__(input_dim, ls, ls_inv, active_dims) - self.period = period - - def full(self, X, Xs=None): - X, Xs = self._slice(X, Xs) - if Xs is None: - Xs = X - f1 = X.dimshuffle(0, "x", 1) - f2 = Xs.dimshuffle("x", 0, 1) - r = np.pi * (f1 - f2) / self.period - r = pt.sum(pt.square(pt.sin(r) / self.ls), 2) - return pt.exp(-0.5 * r) - - class ExpQuad(Stationary): r""" The Exponentiated Quadratic kernel. Also referred to as the Squared @@ -559,11 +568,12 @@ class ExpQuad(Stationary): """ - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) - return pt.exp(-0.5 * self.square_dist(X, Xs)) + r2 = self.square_dist(X, Xs) + return pt.exp(-0.5 * r2) - def power_spectral_density(self, omega): + def power_spectral_density(self, omega: TensorLike) -> TensorVariable: r""" The power spectral density for the ExpQuad kernel is: @@ -588,14 +598,22 @@ class RatQuad(Stationary): k(x, x') = \left(1 + \frac{(x - x')^2}{2\alpha\ell^2} \right)^{-\alpha} """ - def __init__(self, input_dim, alpha, ls=None, ls_inv=None, active_dims=None): + def __init__( + self, + input_dim: int, + alpha, + ls=None, + ls_inv=None, + active_dims: Optional[IntSequence] = None, + ): super().__init__(input_dim, ls, ls_inv, active_dims) self.alpha = alpha - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) + r2 = self.square_dist(X, Xs) return pt.power( - (1.0 + 0.5 * self.square_dist(X, Xs) * (1.0 / self.alpha)), + (1.0 + 0.5 * r2 * (1.0 / self.alpha)), -1.0 * self.alpha, ) @@ -611,12 +629,12 @@ class Matern52(Stationary): \mathrm{exp}\left[ - \frac{\sqrt{5(x - x')^2}}{\ell} \right] """ - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) r = self.euclidean_dist(X, Xs) return (1.0 + np.sqrt(5.0) * r + 5.0 / 3.0 * pt.square(r)) * pt.exp(-1.0 * np.sqrt(5.0) * r) - def power_spectral_density(self, omega): + def power_spectral_density(self, omega: TensorLike) -> TensorVariable: r""" The power spectral density for the Matern52 kernel is: @@ -651,12 +669,12 @@ class Matern32(Stationary): \mathrm{exp}\left[ - \frac{\sqrt{3(x - x')^2}}{\ell} \right] """ - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) r = self.euclidean_dist(X, Xs) return (1.0 + np.sqrt(3.0) * r) * pt.exp(-np.sqrt(3.0) * r) - def power_spectral_density(self, omega): + def power_spectral_density(self, omega: TensorLike) -> TensorVariable: r""" The power spectral density for the Matern32 kernel is: @@ -690,7 +708,7 @@ class Matern12(Stationary): k(x, x') = \mathrm{exp}\left[ -\frac{(x - x')^2}{\ell} \right] """ - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) r = self.euclidean_dist(X, Xs) return pt.exp(-r) @@ -705,9 +723,10 @@ class Exponential(Stationary): k(x, x') = \mathrm{exp}\left[ -\frac{||x - x'||}{2\ell} \right] """ - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) - return pt.exp(-0.5 * self.euclidean_dist(X, Xs)) + r = self.euclidean_dist(X, Xs) + return pt.exp(-0.5 * r) class Cosine(Stationary): @@ -718,9 +737,51 @@ class Cosine(Stationary): k(x, x') = \mathrm{cos}\left( 2 \pi \frac{||x - x'||}{ \ell^2} \right) """ - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: + X, Xs = self._slice(X, Xs) + r = self.euclidean_dist(X, Xs) + return pt.cos(2.0 * np.pi * r) + + +class Periodic(Stationary): + r""" + The Periodic kernel. + + .. math:: + k(x, x') = \mathrm{exp}\left( -\frac{\mathrm{sin}^2(\pi |x-x'| \frac{1}{T})}{2\ell^2} \right) + + Notes + ----- + Note that the scaling factor for this kernel is different compared to the more common + definition (see [1]_). Here, 0.5 is in the exponent instead of the more common value, 2. + Divide the length-scale by 2 when initializing the kernel to recover the standard definition. + + References + ---------- + .. [1] David Duvenaud, "The Kernel Cookbook" + https://www.cs.toronto.edu/~duvenaud/cookbook/ + """ + + def __init__( + self, + input_dim: int, + period, + ls=None, + ls_inv=None, + active_dims: Optional[IntSequence] = None, + ): + super().__init__(input_dim, ls, ls_inv, active_dims) + self.period = period + + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) - return pt.cos(2.0 * np.pi * self.euclidean_dist(X, Xs)) + if Xs is None: + Xs = X + f1 = pt.expand_dims(X, axis=(0,)) + f2 = pt.expand_dims(Xs, axis=(1,)) + r = np.pi * (f1 - f2) / self.period + r2 = pt.sum(pt.square(pt.sin(r) / self.ls), 2) + return pt.exp(-0.5 * r2) class Linear(Covariance): @@ -731,7 +792,7 @@ class Linear(Covariance): k(x, x') = (x - c)(x' - c) """ - def __init__(self, input_dim, c, active_dims=None): + def __init__(self, input_dim: int, c, active_dims: Optional[IntSequence] = None): super().__init__(input_dim, active_dims) self.c = c @@ -740,7 +801,7 @@ def _common(self, X, Xs=None): Xc = pt.sub(X, self.c) return X, Xc, Xs - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xc, Xs = self._common(X, Xs) if Xs is None: return pt.dot(Xc, pt.transpose(Xc)) @@ -748,7 +809,7 @@ def full(self, X, Xs=None): Xsc = pt.sub(Xs, self.c) return pt.dot(Xc, pt.transpose(Xsc)) - def diag(self, X): + def diag(self, X: TensorLike) -> TensorVariable: X, Xc, _ = self._common(X, None) return pt.sum(pt.square(Xc), 1) @@ -761,16 +822,16 @@ class Polynomial(Linear): k(x, x') = [(x - c)(x' - c) + \mathrm{offset}]^{d} """ - def __init__(self, input_dim, c, d, offset, active_dims=None): + def __init__(self, input_dim: int, c, d, offset, active_dims: Optional[IntSequence] = None): super().__init__(input_dim, c, active_dims) self.d = d self.offset = offset - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: linear = super().full(X, Xs) return pt.power(linear + self.offset, self.d) - def diag(self, X): + def diag(self, X: TensorLike) -> TensorVariable: linear = super().diag(X) return pt.power(linear + self.offset, self.d) @@ -792,24 +853,31 @@ class WarpedInput(Covariance): Additional inputs (besides X or Xs) to warp_func. """ - def __init__(self, input_dim, cov_func, warp_func, args=None, active_dims=None): + def __init__( + self, + input_dim: int, + cov_func: Covariance, + warp_func: Callable, + args=None, + active_dims: Optional[IntSequence] = None, + ): super().__init__(input_dim, active_dims) if not callable(warp_func): raise TypeError("warp_func must be callable") if not isinstance(cov_func, Covariance): raise TypeError("Must be or inherit from the Covariance class") - self.w = handle_args(warp_func, args) + self.w = handle_args(warp_func) self.args = args self.cov_func = cov_func - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) if Xs is None: return self.cov_func(self.w(X, self.args), Xs) else: return self.cov_func(self.w(X, self.args), self.w(Xs, self.args)) - def diag(self, X): + def diag(self, X: TensorLike) -> TensorVariable: X, _ = self._slice(X, None) return self.cov_func(self.w(X, self.args), diag=True) @@ -832,7 +900,13 @@ class Gibbs(Covariance): Additional inputs (besides X or Xs) to lengthscale_func. """ - def __init__(self, input_dim, lengthscale_func, args=None, active_dims=None): + def __init__( + self, + input_dim: int, + lengthscale_func: Callable, + args=None, + active_dims: Optional[IntSequence] = None, + ): super().__init__(input_dim, active_dims) if active_dims is not None: if len(active_dims) > 1: @@ -842,7 +916,7 @@ def __init__(self, input_dim, lengthscale_func, args=None, active_dims=None): raise NotImplementedError(("Higher dimensional inputs ", "are untested")) if not callable(lengthscale_func): raise TypeError("lengthscale_func must be callable") - self.lfunc = handle_args(lengthscale_func, args) + self.lfunc = handle_args(lengthscale_func) self.args = args def square_dist(self, X, Xs=None): @@ -858,7 +932,7 @@ def square_dist(self, X, Xs=None): ) return pt.clip(sqd, 0.0, np.inf) - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) rx = self.lfunc(pt.as_tensor_variable(X), self.args) if Xs is None: @@ -871,8 +945,8 @@ def full(self, X, Xs=None): rz2 = pt.reshape(pt.square(rz), (1, -1)) return pt.sqrt((2.0 * pt.outer(rx, rz)) / (rx2 + rz2)) * pt.exp(-1.0 * r2 / (rx2 + rz2)) - def diag(self, X): - return pt.alloc(1.0, X.shape[0]) + def diag(self, X: TensorLike) -> TensorVariable: + return self._alloc(1.0, X.shape[0]) class ScaledCov(Covariance): @@ -894,23 +968,30 @@ class ScaledCov(Covariance): Additional inputs (besides X or Xs) to lengthscale_func. """ - def __init__(self, input_dim, cov_func, scaling_func, args=None, active_dims=None): + def __init__( + self, + input_dim: int, + cov_func: Covariance, + scaling_func: Callable, + args=None, + active_dims: Optional[IntSequence] = None, + ): super().__init__(input_dim, active_dims) if not callable(scaling_func): raise TypeError("scaling_func must be callable") if not isinstance(cov_func, Covariance): raise TypeError("Must be or inherit from the Covariance class") self.cov_func = cov_func - self.scaling_func = handle_args(scaling_func, args) + self.scaling_func = handle_args(scaling_func) self.args = args - def diag(self, X): + def diag(self, X: TensorLike) -> TensorVariable: X, _ = self._slice(X, None) cov_diag = self.cov_func(X, diag=True) scf_diag = pt.square(pt.flatten(self.scaling_func(X, self.args))) return cov_diag * scf_diag - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) scf_x = self.scaling_func(X, self.args) if Xs is None: @@ -953,7 +1034,14 @@ class Coregion(Covariance): `input_dim != 1`, then `active_dims` must have a length of one. """ - def __init__(self, input_dim, W=None, kappa=None, B=None, active_dims=None): + def __init__( + self, + input_dim: int, + W=None, + kappa=None, + B=None, + active_dims: Optional[IntSequence] = None, + ): super().__init__(input_dim, active_dims) if len(self.active_dims) != 1: raise ValueError("Coregion requires exactly one dimension to be active") @@ -969,7 +1057,7 @@ def __init__(self, input_dim, W=None, kappa=None, B=None, active_dims=None): else: raise ValueError("Exactly one of (W, kappa) and B must be provided to Coregion") - def full(self, X, Xs=None): + def full(self, X: TensorLike, Xs: Optional[TensorLike] = None) -> TensorVariable: X, Xs = self._slice(X, Xs) index = pt.cast(X, "int32") if Xs is None: @@ -978,13 +1066,13 @@ def full(self, X, Xs=None): index2 = pt.cast(Xs, "int32").T return self.B[index, index2] - def diag(self, X): + def diag(self, X: TensorLike) -> TensorVariable: X, _ = self._slice(X, None) index = pt.cast(X, "int32") return pt.diag(self.B)[index.ravel()] -def handle_args(func, args): +def handle_args(func: Callable) -> Callable: def f(x, args): if args is None: return func(x) diff --git a/tests/gp/test_cov.py b/tests/gp/test_cov.py index ba8d41962c..feef63ab80 100644 --- a/tests/gp/test_cov.py +++ b/tests/gp/test_cov.py @@ -302,13 +302,20 @@ def test_covexp_shared(self): npt.assert_allclose(np.diag(K), Kd, atol=1e-5) def test_invalid_covexp(self): - X = np.linspace(0, 1, 10)[:, None] with pytest.raises( ValueError, match=r"A covariance function can only be exponentiated by a scalar value" ): - with pm.Model() as model: + with pm.Model(): a = np.array([[1.0, 2.0]]) - cov = pm.gp.cov.ExpQuad(1, 0.1) ** a + pm.gp.cov.ExpQuad(1, 0.1) ** a + + def test_invalid_covexp_noncov(self): + with pytest.raises( + TypeError, + match=r"Can only exponentiate covariance functions which inherit from `Covariance`", + ): + with pm.Model(): + pm.gp.cov.Constant(2) ** 2 class TestCovKron: @@ -765,9 +772,9 @@ def func_twoarg(x, a, b): x = 100 a = 2 b = 3 - func_noargs2 = pm.gp.cov.handle_args(func_noargs, None) - func_onearg2 = pm.gp.cov.handle_args(func_onearg, a) - func_twoarg2 = pm.gp.cov.handle_args(func_twoarg, args=(a, b)) + func_noargs2 = pm.gp.cov.handle_args(func_noargs) + func_onearg2 = pm.gp.cov.handle_args(func_onearg) + func_twoarg2 = pm.gp.cov.handle_args(func_twoarg) assert func_noargs(x) == func_noargs2(x, args=None) assert func_onearg(x, a) == func_onearg2(x, args=a) assert func_twoarg(x, a, b) == func_twoarg2(x, args=(a, b))